diff --git a/.github/actions/awk-matcher.json b/.github/actions/awk-matcher.json deleted file mode 100644 index 852a723577..0000000000 --- a/.github/actions/awk-matcher.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "problemMatcher": [ - { - "owner": "awk", - "pattern": [ - { - "regexp": "^([^:]+):([^ ]+) (([^:]+):.*)$", - "file": 1, "line": 2, "message": 3, "severity": 4 - } - ] - } - ] -} diff --git a/.github/actions/k3d/action.yaml b/.github/actions/k3d/action.yaml index 395d5f1116..b6e6ed5c2b 100644 --- a/.github/actions/k3d/action.yaml +++ b/.github/actions/k3d/action.yaml @@ -16,7 +16,7 @@ inputs: description: > Each line is the name of an image to fetch onto all Kubernetes nodes prefetch-timeout: - default: 90s + default: 3m required: true description: > Amount of time to wait for images to be fetched diff --git a/.github/actions/trivy/action.yaml b/.github/actions/trivy/action.yaml new file mode 100644 index 0000000000..bcc67421cb --- /dev/null +++ b/.github/actions/trivy/action.yaml @@ -0,0 +1,138 @@ +# Copyright 2024 - 2025 Crunchy Data Solutions, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# schema documentation: https://docs.github.com/actions/sharing-automations/creating-actions/metadata-syntax-for-github-actions +# yaml-language-server: $schema=https://json.schemastore.org/github-action.json + +name: Trivy +description: Scan this project using Trivy + +# The Trivy team maintains an action, but it has trouble caching its vulnerability data: +# https://github.com/aquasecurity/trivy-action/issues/389 +# +# 1. It caches vulnerability data once per calendar day, despite Trivy wanting +# to download more frequently than that. +# 2. When it fails to download the data, it fails the workflow *and* caches +# the incomplete data. +# 3. When (1) and (2) coincide, every following run that day *must* update the data, +# producing more opportunities for (2) and more failed workflows. +# +# The action below uses any recent cache matching `cache-prefix` and calculates a cache key +# derived from the data Trivy downloads. An older database is better than no scans at all. +# When a run successfully updates the data, that data is cached and available to other runs. + +inputs: + cache: + default: restore,success,use + description: >- + What Trivy data to cache; one or more of restore, save, success, or use. + The value "use" instructs Trivy to read and write to its cache. + The value "restore" loads the Trivy cache from GitHub. + The value "success" saves the Trivy cache to GitHub when Trivy succeeds. + The value "save" saves the Trivy cache to GitHub regardless of Trivy. + + database: + default: update + description: >- + How Trivy should handle its data; one of update or skip. + The value "skip" fetches no Trivy data at all. + + setup: + default: v0.65.0,cache + description: >- + How to install Trivy; one or more of version, none, or cache. + The value "none" does not install Trivy at all. + + cache-directory: + default: ${{ github.workspace }}/.cache/trivy + description: >- + Directory where Trivy should store its data + + cache-prefix: + default: cache-trivy + description: >- + Name (key) where Trivy data should be stored in the GitHub cache + + scan-target: + default: . + description: >- + What Trivy should scan + + scan-type: + default: repository + description: >- + How Trivy should interpret scan-target; one of filesystem, image, repository, or sbom. + +runs: + using: composite + steps: + # Parse list inputs as separated by commas and spaces. + # Select the maximum version-looking string from `inputs.setup`. + - id: parsed + shell: bash + run: | + # Validate inputs + ( + <<< '${{ inputs.cache }}' jq -rRsS '"cache=\(split("[,\\s]+"; "") - [""])"' + <<< '${{ inputs.setup }}' jq -rRsS ' + "setup=\(split("[,\\s]+"; "") - [""])", + "version=\(split("[,\\s]+"; "") | max_by(split("[v.]"; "") | map(tonumber?)))" + ' + ) | tee --append "${GITHUB_OUTPUT}" + + # Install Trivy as requested. + # NOTE: `setup-trivy` can download a "latest" version but cannot cache it. + - if: ${{ ! contains(fromJSON(steps.parsed.outputs.setup), 'none') }} + uses: aquasecurity/setup-trivy@v0.2.4 + with: + cache: ${{ contains(fromJSON(steps.parsed.outputs.setup), 'cache') }} + version: ${{ steps.parsed.outputs.version }} + + # Restore a recent cache beginning with the prefix. + - id: restore + if: ${{ contains(fromJSON(steps.parsed.outputs.cache), 'restore') }} + uses: actions/cache/restore@v4 + with: + path: ${{ inputs.cache-directory }} + key: ${{ inputs.cache-prefix }}- + + - id: trivy + shell: bash + env: + TRIVY_CACHE_DIR: >- + ${{ contains(fromJSON(steps.parsed.outputs.cache), 'use') && inputs.cache-directory || '' }} + TRIVY_SKIP_CHECK_UPDATE: ${{ inputs.database == 'skip' }} + TRIVY_SKIP_DB_UPDATE: ${{ inputs.database == 'skip' }} + TRIVY_SKIP_JAVA_DB_UPDATE: ${{ inputs.database == 'skip' }} + TRIVY_SKIP_VEX_REPO_UPDATE: ${{ inputs.database == 'skip' }} + run: | + # Run Trivy + trivy '${{ inputs.scan-type }}' '${{ inputs.scan-target }}' || result=$? + + checksum=$([[ -z "${TRIVY_CACHE_DIR}" ]] || cat "${TRIVY_CACHE_DIR}/"*/metadata.json | sha256sum) + echo 'cache-key=${{ inputs.cache-prefix }}-'"${checksum%% *}" >> "${GITHUB_OUTPUT}" + + exit "${result-0}" + + # Save updated data to the cache when requested. + - if: >- + ${{ + steps.restore.outcome == 'success' && + steps.restore.outputs.cache-matched-key == steps.trivy.outputs.cache-key + }} + shell: bash + run: | + # Cache hit on ${{ steps.restore.outputs.cache-matched-key }} + - if: >- + ${{ + steps.restore.outputs.cache-matched-key != steps.trivy.outputs.cache-key && + ( + (contains(fromJSON(steps.parsed.outputs.cache), 'save') && !cancelled()) || + (contains(fromJSON(steps.parsed.outputs.cache), 'success') && success()) + ) + }} + uses: actions/cache/save@v4 + with: + key: ${{ steps.trivy.outputs.cache-key }} + path: ${{ inputs.cache-directory }} diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 639a059edc..8a16fc8d6f 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,16 +1,63 @@ -# https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file -# https://docs.github.com/code-security/dependabot/dependabot-version-updates/customizing-dependency-updates +# Copyright 2024 - 2025 Crunchy Data Solutions, Inc. # -# See: https://www.github.com/dependabot/dependabot-core/issues/4605 ---- +# SPDX-License-Identifier: Apache-2.0 +# +# documentation: https://docs.github.com/code-security/dependabot/dependabot-version-updates +# schema documentation: https://docs.github.com/code-security/dependabot/working-with-dependabot/dependabot-options-reference # yaml-language-server: $schema=https://json.schemastore.org/dependabot-2.0.json +# +# Dependabot allows only one schedule per package-ecosystem, directory, and target-branch. +# Configurations that lack a "target-branch" field also affect security updates. +# +# There is a hack to have *two* schedules: https://github.com/dependabot/dependabot-core/issues/1778#issuecomment-1988140219 +--- version: 2 + updates: - package-ecosystem: github-actions - directory: / + directories: + # "/" is a special case that includes ".github/workflows/*" + - '/' + - '.github/actions/*' + registries: '*' schedule: interval: weekly day: tuesday + labels: + - dependencies groups: - all-github-actions: + # Group security updates into one pull request + action-vulnerabilities: + applies-to: security-updates + patterns: ['*'] + + # Group version updates into one pull request + github-actions: + applies-to: version-updates + patterns: ['*'] + + - package-ecosystem: gomod + directory: '/' + registries: '*' + schedule: + interval: weekly + day: wednesday + labels: + - dependencies + groups: + # Group security updates into one pull request + go-vulnerabilities: + applies-to: security-updates + patterns: ['*'] + + # Group Kubernetes and OpenTelemetry version updates into separate pull requests + kubernetes: + patterns: ['k8s.io/*', 'sigs.k8s.io/*'] + opentelemetry: + patterns: ['go.opentelemetry.io/*'] + go-dependencies: patterns: ['*'] + exclude-patterns: + - 'k8s.io/*' + - 'sigs.k8s.io/*' + - 'go.opentelemetry.io/*' diff --git a/.github/workflows/codeql-analysis.yaml b/.github/workflows/codeql-analysis.yaml index 4697a8b0aa..0c268c33a8 100644 --- a/.github/workflows/codeql-analysis.yaml +++ b/.github/workflows/codeql-analysis.yaml @@ -1,37 +1,37 @@ +# https://codeql.github.com name: CodeQL on: pull_request: branches: - - master + - REL_5_7 push: branches: - - master + - REL_5_7 schedule: - cron: '10 18 * * 2' jobs: analyze: - runs-on: ubuntu-latest + if: ${{ github.repository == 'CrunchyData/postgres-operator' }} permissions: actions: read contents: read security-events: write - if: ${{ github.repository == 'CrunchyData/postgres-operator' }} - + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 with: { go-version: stable } - name: Initialize CodeQL - uses: github/codeql-action/init@v3 + uses: github/codeql-action/init@v4 with: { languages: go } - name: Autobuild # This action calls `make` which runs our "help" target. - uses: github/codeql-action/autobuild@v3 + uses: github/codeql-action/autobuild@v4 - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v3 + uses: github/codeql-action/analyze@v4 diff --git a/.github/workflows/govulncheck.yaml b/.github/workflows/govulncheck.yaml new file mode 100644 index 0000000000..6684f0c937 --- /dev/null +++ b/.github/workflows/govulncheck.yaml @@ -0,0 +1,50 @@ +# https://go.dev/security/vuln +name: govulncheck + +on: + pull_request: + branches: + - REL_5_7 + push: + branches: + - REL_5_7 + +env: + # Use the Go toolchain installed by setup-go + # https://github.com/actions/setup-go/issues/457 + # + # TODO(govulncheck): Remove when "golang/govulncheck-action" uses "actions/setup-go" v6 or newer + GOTOOLCHAIN: local + +jobs: + vulnerabilities: + if: ${{ github.repository == 'CrunchyData/postgres-operator' }} + permissions: + security-events: write + + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + + # Install Go and produce a SARIF report. This fails only when the tool is + # unable to scan. + - name: Prepare report + uses: golang/govulncheck-action@v1 + with: + output-file: 'govulncheck-results.sarif' + output-format: 'sarif' + repo-checkout: false + + # Submit the SARIF report to GitHub code scanning. Pull request checks + # succeed or fail according to branch protection rules. + # - https://docs.github.com/en/code-security/code-scanning + - name: Upload results to GitHub + uses: github/codeql-action/upload-sarif@v4 + with: + sarif_file: 'govulncheck-results.sarif' + + # Print any detected vulnerabilities to the workflow log. This step fails + # when the tool detects a vulnerability in code that is called. + # - https://go.dev/blog/govulncheck + - name: Log results + run: govulncheck --format text --show verbose ./... diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml index af302e7638..c6c90b7662 100644 --- a/.github/workflows/lint.yaml +++ b/.github/workflows/lint.yaml @@ -3,20 +3,20 @@ name: Linters on: pull_request: branches: - - master + - REL_5_7 jobs: golangci-lint: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 permissions: contents: read checks: write steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 with: { go-version: stable } - - uses: golangci/golangci-lint-action@v6 + - uses: golangci/golangci-lint-action@v9 with: version: latest args: --timeout=5m @@ -25,12 +25,8 @@ jobs: # exits zero to ensure it does not fail the pull request check. - name: Count non-blocking issues run: | - golangci-lint run --config .golangci.next.yaml \ - --issues-exit-code 0 \ - --max-issues-per-linter 0 \ - --max-same-issues 0 \ - --out-format json | - jq --sort-keys 'reduce .Issues[] as $i ({}; .[$i.FromLinter] += 1)' | - awk >> "${GITHUB_STEP_SUMMARY}" ' - NR == 1 { print "```json" } { print } END { if (NR > 0) print "```" } - ' || true + golangci-lint run --config .golangci.next.yaml --show-stats >> "${GITHUB_STEP_SUMMARY}" \ + --max-issues-per-linter=0 \ + --max-same-issues=0 \ + --uniq-by-line=0 \ + --output.text.path=/dev/null ||: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index aef10d7694..41a7b91ac0 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -3,45 +3,46 @@ name: Tests on: pull_request: branches: - - master + - REL_5_7 push: branches: - - master + - REL_5_7 jobs: go-test: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 - with: { go-version: stable } - - run: make check - - run: make check-generate + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 + with: { go-version: 1.24 } - name: Ensure go.mod is tidy - run: go mod tidy && git diff --exit-code -- go.mod + run: go mod tidy --diff + - name: Ensure generated files are committed + run: make check-generate + - run: make check kubernetes-api: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [go-test] strategy: fail-fast: false matrix: kubernetes: ['default'] steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 with: { go-version: stable } + - run: go mod download - run: ENVTEST_K8S_VERSION="${KUBERNETES#default}" make check-envtest env: KUBERNETES: "${{ matrix.kubernetes }}" - GOEXPERIMENT: nocoverageredesign # https://go.dev/issue/65653 GO_TEST: go test --coverprofile 'envtest.coverage' --coverpkg ./internal/... # Upload coverage to GitHub - run: gzip envtest.coverage - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 with: name: "~coverage~kubernetes-api=${{ matrix.kubernetes }}" path: envtest.coverage.gz @@ -49,15 +50,15 @@ jobs: kubernetes-k3d: if: "${{ github.repository == 'CrunchyData/postgres-operator' }}" - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: [go-test] strategy: fail-fast: false matrix: - kubernetes: [v1.30, v1.25] + kubernetes: [v1.28, v1.34] steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 with: { go-version: stable } - name: Start k3s @@ -65,60 +66,59 @@ jobs: with: k3s-channel: "${{ matrix.kubernetes }}" prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-1 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-1 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.7-1 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.11-2547 - run: make createnamespaces check-envtest-existing env: PGO_TEST_TIMEOUT_SCALE: 1.2 - GOEXPERIMENT: nocoverageredesign # https://go.dev/issue/65653 GO_TEST: go test --coverprofile 'envtest-existing.coverage' --coverpkg ./internal/... # Upload coverage to GitHub - run: gzip envtest-existing.coverage - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 with: name: "~coverage~kubernetes-k3d=${{ matrix.kubernetes }}" path: envtest-existing.coverage.gz retention-days: 1 - kuttl-k3d: - runs-on: ubuntu-latest + e2e-k3d-kuttl: + runs-on: ubuntu-24.04 needs: [go-test] strategy: fail-fast: false matrix: - kubernetes: [v1.29, v1.28, v1.27, v1.26, v1.25] + kubernetes: [v1.28, v1.34] steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 with: { go-version: stable } - name: Start k3s uses: ./.github/actions/k3d with: k3s-channel: "${{ matrix.kubernetes }}" + prefetch-timeout: 5m prefetch-images: | - registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-26 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-1 - registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-1 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest - registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.7-1 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.7-3.3-1 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.3-1 - registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.4-1 - - run: go mod download - - name: Build executable - run: PGO_VERSION='${{ github.sha }}' make build-postgres-operator + registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.7-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.11-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.18.1-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-18.1-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.8-2547 + registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-2547 - name: Get pgMonitor files. run: make get-pgmonitor env: PGMONITOR_DIR: "${{ github.workspace }}/hack/tools/pgmonitor" - QUERIES_CONFIG_DIR: "${{ github.workspace }}/hack/tools/queries" + QUERIES_CONFIG_DIR: "${{ github.workspace }}/hack/tools/queries" + + - run: go mod download + - name: Build executable + run: PGO_VERSION='${{ github.sha }}' make build-postgres-operator # Start a Docker container with the working directory mounted. - name: Start PGO @@ -130,53 +130,47 @@ jobs: --volume "$(pwd):/mnt" --workdir '/mnt' --env 'PATH=/mnt/bin' \ --env 'QUERIES_CONFIG_DIR=/mnt/hack/tools/queries' \ --env 'KUBECONFIG=hack/.kube/postgres-operator/pgo' \ - --env 'RELATED_IMAGE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-26' \ - --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-1' \ - --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-1' \ - --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest' \ - --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest' \ - --env 'RELATED_IMAGE_POSTGRES_15=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.7-1' \ - --env 'RELATED_IMAGE_POSTGRES_15_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.7-3.3-1' \ - --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1' \ - --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.3=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.3-1' \ - --env 'RELATED_IMAGE_POSTGRES_16_GIS_3.4=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.4-1' \ - --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.6-1' \ + --env 'RELATED_IMAGE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-2547' \ + --env 'RELATED_IMAGE_PGBACKREST=registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi9-2.56.0-2547' \ + --env 'RELATED_IMAGE_PGBOUNCER=registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi9-1.24-2547' \ + --env 'RELATED_IMAGE_PGEXPORTER=registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi9-0.18.1-2547' \ + --env 'RELATED_IMAGE_PGUPGRADE=registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi9-18.1-2547' \ + --env 'RELATED_IMAGE_POSTGRES_16=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.11-2547' \ + --env 'RELATED_IMAGE_POSTGRES_17=registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-17.7-2547' \ + --env 'RELATED_IMAGE_STANDALONE_PGADMIN=registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi9-9.8-2547' \ + --env 'RELATED_IMAGE_COLLECTOR=registry.developers.crunchydata.com/crunchydata/postgres-operator:ubi9-5.8.5-0' \ --env 'PGO_FEATURE_GATES=TablespaceVolumes=true' \ --name 'postgres-operator' ubuntu \ postgres-operator - - name: Install kuttl - run: | - curl -Lo /usr/local/bin/kubectl-kuttl https://github.com/kudobuilder/kuttl/releases/download/v0.13.0/kubectl-kuttl_0.13.0_linux_x86_64 - chmod +x /usr/local/bin/kubectl-kuttl - run: make generate-kuttl env: - KUTTL_PG_UPGRADE_FROM_VERSION: '15' - KUTTL_PG_UPGRADE_TO_VERSION: '16' - KUTTL_PG_VERSION: '15' + KUTTL_PG_UPGRADE_FROM_VERSION: '16' + KUTTL_PG_UPGRADE_TO_VERSION: '17' + KUTTL_PG_VERSION: '16' KUTTL_POSTGIS_VERSION: '3.4' - KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1' + KUTTL_PSQL_IMAGE: 'registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi9-16.11-2547' - run: | make check-kuttl && exit failed=$? echo '::group::PGO logs'; docker logs 'postgres-operator'; echo '::endgroup::' exit $failed - env: - KUTTL_TEST: kubectl-kuttl test + - name: Stop PGO run: docker stop 'postgres-operator' || true coverage-report: if: ${{ success() || contains(needs.*.result, 'success') }} - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 needs: - kubernetes-api - kubernetes-k3d + - e2e-k3d-kuttl steps: - - uses: actions/checkout@v4 - - uses: actions/setup-go@v5 + - uses: actions/checkout@v5 + - uses: actions/setup-go@v6 with: { go-version: stable } - - uses: actions/download-artifact@v4 + - uses: actions/download-artifact@v6 with: { path: download } # Combine the coverage profiles by taking the mode line from any one file @@ -200,7 +194,7 @@ jobs: # Upload coverage to GitHub - run: gzip total-coverage.html - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v5 with: name: coverage-report=html path: total-coverage.html.gz diff --git a/.github/workflows/trivy.yaml b/.github/workflows/trivy.yaml index e10eed3aae..f8d7734c6f 100644 --- a/.github/workflows/trivy.yaml +++ b/.github/workflows/trivy.yaml @@ -1,88 +1,127 @@ +# https://aquasecurity.github.io/trivy name: Trivy on: pull_request: branches: - - master + - REL_5_7 push: branches: - - master + - REL_5_7 + +env: + # Use the committed Trivy configuration files. + TRIVY_IGNOREFILE: .trivyignore.yaml + TRIVY_SECRET_CONFIG: trivy-secret.yaml jobs: + cache: + # Run only one of these jobs at a time across the entire project. + concurrency: { group: trivy-cache } + # Do not fail this workflow when this job fails. + continue-on-error: true + + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + - name: Download Trivy + uses: ./.github/actions/trivy + env: + TRIVY_DEBUG: true + TRIVY_DOWNLOAD_DB_ONLY: true + TRIVY_NO_PROGRESS: true + TRIVY_SCANNERS: license,secret,vuln + with: + cache: restore,success,use + database: update + licenses: - runs-on: ubuntu-latest + # Run this job after the cache job regardless of its success or failure. + needs: [cache] + if: >- + ${{ !cancelled() }} + + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 # Trivy needs a populated Go module cache to detect Go module licenses. - - uses: actions/setup-go@v5 + - uses: actions/setup-go@v6 with: { go-version: stable } - run: go mod download - # Login to the GitHub Packages registry to avoid rate limiting. - # - https://aquasecurity.github.io/trivy/v0.55/docs/references/troubleshooting/#github-rate-limiting - # - https://github.com/aquasecurity/trivy/issues/7580 - # - https://github.com/aquasecurity/trivy-action/issues/389 - # - https://docs.github.com/en/packages/working-with-a-github-packages-registry/working-with-the-container-registry - # - https://docs.github.com/en/packages/managing-github-packages-using-github-actions-workflows/publishing-and-installing-a-package-with-github-actions - - name: Login to GitHub Packages - run: > - docker login ghcr.io - --username '${{ github.actor }}' - --password-stdin <<< '${{ secrets.GITHUB_TOKEN }}' - - # Report success only when detected licenses are listed in [/trivy.yaml]. - # The "aquasecurity/trivy-action" action cannot access the Go module cache, - # so run Trivy from an image with the cache and local configuration mounted. - # - https://github.com/aquasecurity/trivy-action/issues/219 - # - https://github.com/aquasecurity/trivy/pkgs/container/trivy + # Report success only when detected licenses are listed in [.trivyignore.yaml]. - name: Scan licenses - run: > - docker run - --env 'DOCKER_CONFIG=/docker' --volume "${HOME}/.docker:/docker" - --env 'GOPATH=/go' --volume "$(go env GOPATH):/go" - --workdir '/mnt' --volume "$(pwd):/mnt" - 'ghcr.io/aquasecurity/trivy:latest' - filesystem --debug --exit-code=1 --scanners=license . + uses: ./.github/actions/trivy + env: + TRIVY_DEBUG: true + TRIVY_EXIT_CODE: 1 + TRIVY_SCANNERS: license + with: + cache: restore,use + database: skip - vulnerabilities: - if: ${{ github.repository == 'CrunchyData/postgres-operator' }} + secrets: + # Run this job after the cache job regardless of its success or failure. + needs: [cache] + if: >- + ${{ !cancelled() }} - permissions: - # for github/codeql-action/upload-sarif to upload SARIF results - security-events: write + runs-on: ubuntu-24.04 + steps: + - uses: actions/checkout@v5 + + # Report success only when detected secrets are listed in [.trivyignore.yaml]. + - name: Scan secrets + uses: ./.github/actions/trivy + env: + TRIVY_EXIT_CODE: 1 + TRIVY_SCANNERS: secret + with: + cache: restore,use + database: skip - runs-on: ubuntu-latest + vulnerabilities: + # Run this job after the cache job regardless of its success or failure. + needs: [cache] + if: >- + ${{ github.repository == 'CrunchyData/postgres-operator' && !cancelled() }} + permissions: + security-events: write + runs-on: ubuntu-24.04 steps: - - uses: actions/checkout@v4 - - # Run trivy and log detected and fixed vulnerabilities - # This report should match the uploaded code scan report below - # and is a convenience/redundant effort for those who prefer to - # read logs and/or if anything goes wrong with the upload. - - name: Log all detected vulnerabilities - uses: aquasecurity/trivy-action@master + - uses: actions/checkout@v5 + + # Print any detected secrets or vulnerabilities to the workflow log for + # human consumption. This step fails only when Trivy is unable to scan. + # A later step uploads results to GitHub as a pull request check. + - name: Log detected vulnerabilities + uses: ./.github/actions/trivy + env: + TRIVY_SCANNERS: secret,vuln with: - scan-type: filesystem - hide-progress: true - ignore-unfixed: true - scanners: secret,vuln - - # Upload actionable results to the GitHub Security tab. - # Pull request checks fail according to repository settings. - # - https://docs.github.com/en/code-security/code-scanning/integrating-with-code-scanning/uploading-a-sarif-file-to-github - # - https://docs.github.com/en/code-security/code-scanning/automatically-scanning-your-code-for-vulnerabilities-and-errors/configuring-code-scanning + cache: restore,use + database: skip + + # Produce a SARIF report of actionable results. This step fails only when + # Trivy is unable to scan. - name: Report actionable vulnerabilities - uses: aquasecurity/trivy-action@master + uses: ./.github/actions/trivy + env: + TRIVY_IGNORE_UNFIXED: true + TRIVY_FORMAT: 'sarif' + TRIVY_OUTPUT: 'trivy-results.sarif' + TRIVY_SCANNERS: secret,vuln with: - scan-type: filesystem - ignore-unfixed: true - format: 'sarif' - output: 'trivy-results.sarif' - scanners: secret,vuln - - - name: Upload Trivy scan results to GitHub Security tab - uses: github/codeql-action/upload-sarif@v3 + cache: use + database: skip + setup: none + + # Submit the SARIF report to GitHub code scanning. Pull requests checks + # succeed or fail according to branch protection rules. + # - https://docs.github.com/en/code-security/code-scanning + - name: Upload results to GitHub + uses: github/codeql-action/upload-sarif@v4 with: sarif_file: 'trivy-results.sarif' diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000..6d641a6350 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,221 @@ +# Copyright Crunchy Data Solutions, Inc. All rights reserved. +# +# schema-documentation: https://docs.gitlab.com/ci/yaml +# yaml-language-server: $schema=https://gitlab.com/gitlab-org/gitlab/-/raw/master/app/assets/javascripts/editor/schema/ci.json + +spec: + inputs: + + # https://go.dev/doc/install/source#environment + architectures: + type: array + default: ['amd64','arm64'] + description: > + The CPU architectures on which to run tests + + # https://docs.gitlab.com/ci/yaml#artifactsexpire_in + retention: + type: string + default: 2d # Enough time to find and address MR failures the following day + description: > + How long to keep reports +--- + +# https://docs.gitlab.com/ci/yaml/workflow +workflow: + rules: + - if: >- + ($CI_PIPELINE_SOURCE == "merge_request_event") || + ($CI_PIPELINE_SOURCE == "schedule") || + ($CI_PIPELINE_SOURCE == "web") + +include: + - component: ${CI_SERVER_FQDN}/containers/gitlab/check-directory-secrets@main + inputs: + job-name: must-not-commit-secrets + job-stage: build + trivy-ignore: .trivyignore.yaml + +variables: + # https://docs.gitlab.com/runner/configuration/feature-flags + # Show the duration of individual script items in the job log. + FF_SCRIPT_SECTIONS: 'true' + +# This uses a specific minor version of golangci-lint to ensure new code conforms +# to the rules we set when this release branch was cut. We do not want new rules +# suggesting sweeping changes to our release branches. +# +# NOTE(2025-04): Some versions of golangci-lint eat memory until they are killed by Linux. +# > Ops Team: +# > this container was hanging around even after the ci job died +# > `golangci-lint run` was using ~240GB of RAM and caused the system to swap +# +# | | go1.21.13 | go1.22.12 | go1.23.8 | go1.24.2 | +# | golangci-lint@v1.54.2 | typecheck | typecheck | panic | typecheck | +# | golangci-lint@v1.55.2 | typecheck | typecheck | panic | typecheck | +# | golangci-lint@v1.56.2 | killed | killed | panic | typecheck | +# | golangci-lint@v1.57.2 | killed | killed | panic | typecheck | +# | golangci-lint@v1.58.2 | killed | killed | panic | typecheck | +# | golangci-lint@v1.59.1 | killed | killed | panic | typecheck | +# | golangci-lint@v1.60.3 | go1.22.1 | go1.23.0 | pass | typecheck | +# | golangci-lint@v1.61.0 | go1.22.1 | go1.23.0 | pass | typecheck | +# | golangci-lint@v1.62.2 | go1.22.1 | go1.23.0 | pass | pass | +# | golangci-lint@v1.63.4 | go1.22.1 | go1.23.0 | pass | pass | +# | golangci-lint@v1.64.8 | go1.23.0 | go1.23.0 | pass | pass | +golang-lint: + stage: build + needs: [] + tags: ['image=container'] + image: '${CI_REGISTRY}/containers/gitlab/go-toolset-ubi8' + script: + # Help Git understand the file permissions here. + # > fatal: detected dubious ownership in repository + - git config --global --add safe.directory "$(pwd)" + + # Download golangci-lint and log its version. + - |- + TOOL='github.com/golangci/golangci-lint/cmd/golangci-lint@v1.64' + go run "${TOOL}" version + + # Produce a report for the GitLab UI. This only fails when the tool crashes. + - >- + go run "${TOOL}" run + --concurrency 2 + --timeout 5m + --issues-exit-code 0 + --max-issues-per-linter 0 + --max-same-issues 0 + --out-format junit-xml-extended > golangci-lint.junit.xml + + # Fail the job if there are any issues found and print a handful to the log. + - >- + go run "${TOOL}" run + --concurrency 2 + --timeout 5m + --verbose + + # Send the report to GitLab. + artifacts: + expire_in: '$[[ inputs.retention ]]' + reports: + junit: golangci-lint.junit.xml + +# This uses an old version of Go because the generation tools require it. +must-commit-generated: + stage: build + needs: [] + tags: ['image=container'] + image: '${CI_REGISTRY}/containers/gitlab/go-toolset-ubi8' + variables: + GOTOOLCHAIN: go1.23.8+auto + script: + # Help Git understand the file permissions here. + # > fatal: detected dubious ownership in repository + - git config --global --add safe.directory "$(pwd)" + - make check-generate + +# This uses the latest version of Go we have internally. +go-test: + stage: test + needs: + - job: must-commit-generated + tags: ['image=container','cpu=${TARGET_ARCHITECTURE}'] + image: '${CI_REGISTRY}/containers/gitlab/go-toolset-ubi8' + parallel: + matrix: + - TARGET_ARCHITECTURE: $[[ inputs.architectures ]] + script: + # Help Git understand the file permissions here. + # > fatal: detected dubious ownership in repository + - git config --global --add safe.directory "$(pwd)" + + # Tidy the file and fail if it changed. + - go mod tidy && git diff --exit-code -- go.mod + - go mod download + + # Run the fast/unit tests first. Failure here fails the job. + - >- + make check + GO_TEST='go run gotest.tools/gotestsum@latest --' + GOTESTSUM_JUNITFILE="make-check-${TARGET_ARCHITECTURE}.junit.xml" + + # Run the entire test suite using a local Kubernetes API. + - >- + make check-envtest + ENVTEST_K8S_VERSION='1.32' + GO_TEST='go run gotest.tools/gotestsum@latest --' + GOTESTSUM_JUNITFILE="make-check-envtest-${TARGET_ARCHITECTURE}.junit.xml" + + # Send the reports to GitLab. + artifacts: + expire_in: '$[[ inputs.retention ]]' + reports: + junit: '*.junit.xml' + +# https://go.dev/blog/govulncheck +govulncheck: + stage: test + needs: [] + rules: + # Run this job during scheduled pipelines and merge requests that change dependencies. + - changes: ['go.mod'] + + tags: ['image=container','cpu=${TARGET_ARCHITECTURE}'] + image: '${CI_REGISTRY}/containers/gitlab/go-toolset-ubi8' + parallel: + matrix: + - TARGET_ARCHITECTURE: $[[ inputs.architectures ]] + script: + # Download govulncheck and log its version. + - |- + TOOL='golang.org/x/vuln/cmd/govulncheck@latest' + go run "${TOOL}" --version + + # Print any detected vulnerabilities to the log. + # This fails the job when it detects a vulnerability in called code. + - go run "${TOOL}" --format text --show verbose ./... + +# https://trivy.dev/latest/ecosystem/cicd +trivy: + stage: test + needs: [] + rules: + # Run this job during scheduled pipelines and merge requests that change dependencies. + - changes: ['go.mod'] + + tags: ['image=container'] + image: '${CI_REGISTRY}/containers/gitlab/go-toolset-ubi8' + script: + # Help Git understand the file permissions here. + # > fatal: detected dubious ownership in repository + - git config --global --add safe.directory "$(pwd)" + + # Download Trivy and log its version. + - |- + VERSION=$(go list -m -f '{{.Version}}' github.com/aquasecurity/trivy@latest) + git clone --config 'advice.detachedHead=no' --depth 1 --branch "${VERSION}" --sparse \ + 'https://github.com/aquasecurity/trivy.git' \ + '.gitlab-remotes/aquasecurity-trivy' + ( + cd '.gitlab-remotes/aquasecurity-trivy' + git sparse-checkout set 'contrib' + bash 'contrib/install.sh' -b "${HOME}/bin" "${VERSION}" + ) + + # Generate a report and fail when there are issues with dependencies. + # Trivy needs a populated Go module cache to detect Go module licenses. + - go mod download + - >- + trivy repository . --exit-code 1 --skip-dirs .gitlab-remotes + --scanners license,vuln + --ignore-unfixed + --no-progress + --format template + --template '@.gitlab-remotes/aquasecurity-trivy/contrib/junit.tpl' + --output 'trivy.junit.xml' + + # Send the report to GitLab. + artifacts: + expire_in: '$[[ inputs.retention ]]' + reports: + junit: 'trivy.junit.xml' diff --git a/.golangci.next.yaml b/.golangci.next.yaml index 95b3f63347..2aa389e841 100644 --- a/.golangci.next.yaml +++ b/.golangci.next.yaml @@ -4,37 +4,95 @@ # Rules that should be enforced immediately belong in [.golangci.yaml]. # # Both files are used by [.github/workflows/lint.yaml]. +version: "2" +# https://golangci-lint.run/usage/linters linters: - disable-all: true - enable: - - contextcheck - - err113 + default: all + disable: + - asasalint + - asciicheck + - bidichk + - bodyclose + - copyloopvar + - depguard + - dupword + - durationcheck - errchkjson - - gocritic - - godot - - godox - - gofumpt - - gosec # exclude-use-default - - nilnil + - errname + - errorlint + - exhaustive + - exptostd + - fatcontext + - forbidigo + - ginkgolinter + - gocheckcompilerdirectives + - gochecksumtype + - goheader + - gomoddirectives + - gomodguard + - goprintffuncname + - gosmopolitan + - grouper + - iface + - importas + - interfacebloat + - intrange + - loggercheck + - makezero + - mirror + - misspell + - musttag + - nilerr + - nilnesserr + - noctx - nolintlint - - predeclared - - revive - - staticcheck # exclude-use-default - - tenv - - thelper - - tparallel + - nosprintfhostport + - prealloc + - promlinter + - protogetter + - reassign + - recvcheck + - rowserrcheck + - sloglint + - spancheck + - sqlclosecheck + - tagalign + - testifylint + - unconvert + - unparam + - usestdlibvars + - usetesting - wastedassign + - wsl + - zerologlint -issues: - # https://github.com/golangci/golangci-lint/issues/2239 - exclude-use-default: false + settings: + thelper: + # https://github.com/kulti/thelper/issues/27 + tb: { begin: true, first: true } + test: { begin: true, first: true, name: true } + + exclusions: + warn-unused: true + # Ignore built-in exclusions + presets: [] + rules: + # We call external linters when they are installed: Flake8, ShellCheck, etc. + - linters: [gosec] + path: '_test[.]go$' + text: 'G204: Subprocess launched with variable' -linters-settings: - errchkjson: - check-error-free-encoding: true +# https://golangci-lint.run/usage/formatters +formatters: + enable: + - gofumpt + +issues: + # Fix only when requested + fix: false - thelper: - # https://github.com/kulti/thelper/issues/27 - tb: { begin: true, first: true } - test: { begin: true, first: true, name: true } + # Show all issues at once + max-issues-per-linter: 0 + max-same-issues: 0 + uniq-by-line: false diff --git a/.golangci.yaml b/.golangci.yaml index 87a6ed0464..99a2ec576b 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -1,87 +1,202 @@ # https://golangci-lint.run/usage/configuration/ +version: "2" +# https://golangci-lint.run/usage/linters linters: - disable: - - contextcheck - - errchkjson - - gci - - gofumpt + default: standard enable: + - asasalint + - asciicheck + - bidichk + - bodyclose + - copyloopvar - depguard + - dupword + - durationcheck + - errchkjson + - errname + - errorlint + - exhaustive + - exptostd + - fatcontext + - forbidigo + - ginkgolinter + - gocheckcompilerdirectives + - gochecksumtype - goheader + - gomoddirectives - gomodguard - - gosimple + - goprintffuncname + - gosec + - gosmopolitan + - grouper + - iface - importas + - interfacebloat + - intrange + - loggercheck + - makezero + - mirror - misspell + - musttag + - nilerr + - nilnesserr + - noctx + - nolintlint + - nosprintfhostport + - prealloc + - promlinter + - protogetter + - reassign + - recvcheck + - rowserrcheck + - sloglint + - spancheck + - sqlclosecheck + - tagalign + - testifylint - unconvert - presets: - - bugs - - format - - unused + - unparam + - usestdlibvars + - usetesting + - wastedassign + - zerologlint + + settings: + dupword: + ignore: + # We might see duplicate instances of 'fi' if we end two bash 'if' statements + - fi + # We use "local all all peer" in some hba tests + - all + + depguard: + rules: + everything: + files: ['$all'] + list-mode: lax + allow: + - go.opentelemetry.io/otel/semconv/v1.27.0 + deny: + - pkg: go.opentelemetry.io/otel/semconv + desc: Use "go.opentelemetry.io/otel/semconv/v1.27.0" instead. + - pkg: io/ioutil + desc: Use the "io" and "os" packages instead. See https://go.dev/doc/go1.16#ioutil + - pkg: math/rand$ + desc: Use the "math/rand/v2" package instead. See https://go.dev/doc/go1.22#math_rand_v2 + not-tests: + files: ['!$test','!**/internal/testing/**'] + list-mode: lax + deny: + - pkg: net/http/httptest + desc: Should be used only in tests. + - pkg: testing/* + desc: The "testing" packages should be used only in tests. + - pkg: github.com/crunchydata/postgres-operator/internal/crd/* + desc: The "internal/crd" packages should be used only in tests. + - pkg: github.com/crunchydata/postgres-operator/internal/testing/* + desc: The "internal/testing" packages should be used only in tests. + + tests: + files: ['$test'] + list-mode: lax + deny: + - pkg: github.com/pkg/errors + desc: Use the "errors" package unless you are interacting with stack traces. + + errchkjson: + check-error-free-encoding: true + + goheader: + template: |- + Copyright {{ DATES }} Crunchy Data Solutions, Inc. + + SPDX-License-Identifier: Apache-2.0 + values: + regexp: + DATES: ((201[7-9]|202[0-4]) - 2025|2025) + + gomodguard: + blocked: + modules: + - go.yaml.in/yaml/v2: { recommendations: [sigs.k8s.io/yaml] } + - go.yaml.in/yaml/v3: { recommendations: [sigs.k8s.io/yaml] } + - gopkg.in/yaml.v2: { recommendations: [sigs.k8s.io/yaml] } + - gopkg.in/yaml.v3: { recommendations: [sigs.k8s.io/yaml] } + - gotest.tools: { recommendations: [gotest.tools/v3] } + - k8s.io/kubernetes: + reason: k8s.io/kubernetes is for building kubelet, kubeadm, etc. + + importas: + no-unaliased: true + alias: + - pkg: k8s.io/api/(\w+)/(v[\w\w]+) + alias: $1$2 + - pkg: k8s.io/apimachinery/pkg/apis/(\w+)/(v[\w\d]+) + alias: $1$2 + - pkg: k8s.io/apimachinery/pkg/api/errors + alias: apierrors -linters-settings: - depguard: + spancheck: + checks: [end, record-error] + extra-start-span-signatures: + - github.com/crunchydata/postgres-operator/internal/tracing.Start:opentelemetry + ignore-check-signatures: + - tracing.Escape + + exclusions: + warn-unused: true + presets: + - common-false-positives + - legacy + - std-error-handling rules: - everything: - deny: - - pkg: io/ioutil - desc: > - Use the "io" and "os" packages instead. - See https://go.dev/doc/go1.16#ioutil - - not-tests: - files: ['!$test'] - deny: - - pkg: net/http/httptest - desc: Should be used only in tests. - - - pkg: testing/* - desc: The "testing" packages should be used only in tests. - - - pkg: github.com/crunchydata/postgres-operator/internal/testing/* - desc: The "internal/testing" packages should be used only in tests. - - exhaustive: - default-signifies-exhaustive: true - - goheader: - template: |- - Copyright {{ DATES }} Crunchy Data Solutions, Inc. - - SPDX-License-Identifier: Apache-2.0 - values: - regexp: - DATES: '((201[7-9]|202[0-3]) - 2024|2024)' - - goimports: - local-prefixes: github.com/crunchydata/postgres-operator - - gomodguard: - blocked: - modules: - - gopkg.in/yaml.v2: { recommendations: [sigs.k8s.io/yaml] } - - gopkg.in/yaml.v3: { recommendations: [sigs.k8s.io/yaml] } - - gotest.tools: { recommendations: [gotest.tools/v3] } - - k8s.io/kubernetes: - reason: > - k8s.io/kubernetes is for managing dependencies of the Kubernetes - project, i.e. building kubelet and kubeadm. - - gosec: - excludes: - # Flags for potentially-unsafe casting of ints, similar problem to globally-disabled G103 - - G115 - - importas: - alias: - - pkg: k8s.io/api/(\w+)/(v[\w\w]+) - alias: $1$2 - - pkg: k8s.io/apimachinery/pkg/apis/(\w+)/(v[\w\d]+) - alias: $1$2 - - pkg: k8s.io/apimachinery/pkg/api/errors - alias: apierrors - no-unaliased: true + # It is fine for tests to use "math/rand" packages. + - linters: [gosec] + path: '(.+)_test[.]go' + text: weak random number generator + + # This internal package is the one place we want to do API discovery. + - linters: [depguard] + path: internal/kubernetes/discovery.go + text: k8s.io/client-go/discovery + + # Postgres HBA rules often include "all all all" + - linters: [dupword] + path: /(hba|postgres)[^/]+$ + text: words \(all\) found + + # These value types have unmarshal methods. + # https://github.com/raeperd/recvcheck/issues/7 + - linters: [recvcheck] + path: internal/pki/pki.go + text: methods of "(Certificate|PrivateKey)" + + - linters: [staticcheck] + text: corev1.(Endpoints|EndpointSubset) is deprecated + + - linters: [staticcheck] + path: internal/controller/ + text: >- + deprecated: Use `RequeueAfter` instead + +# https://golangci-lint.run/usage/formatters +formatters: + enable: + - gci + - gofmt + settings: + gci: + sections: + - standard + - default + - localmodule issues: - exclude-dirs: - - pkg/generated + # Fix only when requested + fix: false + + # Show all issues at once + max-issues-per-linter: 0 + max-same-issues: 0 + uniq-by-line: false diff --git a/.trivyignore.yaml b/.trivyignore.yaml new file mode 100644 index 0000000000..b275e406fa --- /dev/null +++ b/.trivyignore.yaml @@ -0,0 +1,12 @@ +# Copyright Crunchy Data Solutions, Inc. All rights reserved. +# +# https://trivy.dev/latest/docs/configuration/filtering/#trivyignoreyaml + +secrets: + - id: jwt-token + paths: + - internal/testing/token_* + + - id: private-key + paths: + - internal/pki/*_test.go diff --git a/LICENSE.md b/LICENSE.md index 8d57ad6f2e..3960704149 100644 --- a/LICENSE.md +++ b/LICENSE.md @@ -176,7 +176,7 @@ END OF TERMS AND CONDITIONS - Copyright 2017 - 2024 Crunchy Data Solutions, Inc. + Copyright 2017 - 2025 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/Makefile b/Makefile index 0c5da1d5c2..a493694fcc 100644 --- a/Makefile +++ b/Makefile @@ -6,7 +6,7 @@ PGO_IMAGE_URL ?= https://www.crunchydata.com/products/crunchy-postgresql-for-kub PGO_IMAGE_PREFIX ?= localhost PGMONITOR_DIR ?= hack/tools/pgmonitor -PGMONITOR_VERSION ?= v4.11.0 +PGMONITOR_VERSION ?= v5.1.1 QUERIES_CONFIG_DIR ?= hack/tools/queries EXTERNAL_SNAPSHOTTER_DIR ?= hack/tools/external-snapshotter @@ -18,8 +18,9 @@ BUILDAH_BUILD ?= buildah bud GO ?= go GO_BUILD = $(GO) build GO_TEST ?= $(GO) test -KUTTL ?= kubectl-kuttl +KUTTL ?= $(GO) run github.com/kudobuilder/kuttl/cmd/kubectl-kuttl@latest KUTTL_TEST ?= $(KUTTL) test +ENVTEST_K8S_VERSION ?= 1.34 ##@ General @@ -68,7 +69,6 @@ clean: clean-deprecated rm -f config/rbac/role.yaml rm -rf licenses/*/ [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated - [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other rm -rf build/crd/generated build/crd/*/generated [ ! -f hack/tools/setup-envtest ] || rm hack/tools/setup-envtest [ ! -d hack/tools/envtest ] || { chmod -R u+w hack/tools/envtest && rm -r hack/tools/envtest; } @@ -94,6 +94,8 @@ clean-deprecated: ## Clean deprecated resources @# crunchy-postgres-exporter used to live in this repo [ ! -d bin/crunchy-postgres-exporter ] || rm -r bin/crunchy-postgres-exporter [ ! -d build/crunchy-postgres-exporter ] || rm -r build/crunchy-postgres-exporter + @# Old testing directories + [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other ##@ Deployment @@ -133,7 +135,6 @@ deploy-dev: createnamespaces QUERIES_CONFIG_DIR="${QUERIES_CONFIG_DIR}" \ CRUNCHY_DEBUG=true \ PGO_FEATURE_GATES="${PGO_FEATURE_GATES}" \ - CHECK_FOR_UPGRADES='$(if $(CHECK_FOR_UPGRADES),$(CHECK_FOR_UPGRADES),false)' \ KUBECONFIG=hack/.kube/postgres-operator/pgo \ PGO_NAMESPACE='postgres-operator' \ PGO_INSTALLER='deploy-dev' \ @@ -233,11 +234,10 @@ generate-kuttl: export KUTTL_PG_UPGRADE_FROM_VERSION ?= 15 generate-kuttl: export KUTTL_PG_UPGRADE_TO_VERSION ?= 16 generate-kuttl: export KUTTL_PG_VERSION ?= 16 generate-kuttl: export KUTTL_POSTGIS_VERSION ?= 3.4 -generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1 +generate-kuttl: export KUTTL_PSQL_IMAGE ?= registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.11-2547 generate-kuttl: export KUTTL_TEST_DELETE_NAMESPACE ?= kuttl-test-delete-namespace generate-kuttl: ## Generate kuttl tests [ ! -d testing/kuttl/e2e-generated ] || rm -r testing/kuttl/e2e-generated - [ ! -d testing/kuttl/e2e-generated-other ] || rm -r testing/kuttl/e2e-generated-other bash -ceu ' \ case $(KUTTL_PG_VERSION) in \ 16 ) export KUTTL_BITNAMI_IMAGE_TAG=16.0.0-debian-11-r3 ;; \ @@ -254,7 +254,7 @@ generate-kuttl: ## Generate kuttl tests source="$${1}" target="$${1/e2e/e2e-generated}"; \ mkdir -p "$${target%/*}"; render < "$${source}" > "$${target}"; \ shift; \ - done' - testing/kuttl/e2e/*/*.yaml testing/kuttl/e2e-other/*/*.yaml testing/kuttl/e2e/*/*/*.yaml testing/kuttl/e2e-other/*/*/*.yaml + done' - testing/kuttl/e2e/*/*.yaml testing/kuttl/e2e/*/*/*.yaml ##@ Generate diff --git a/bin/license_aggregator.sh b/bin/license_aggregator.sh index 66f7284a97..1d044039ec 100755 --- a/bin/license_aggregator.sh +++ b/bin/license_aggregator.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +# Copyright 2021 - 2025 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/cmd/postgres-operator/main.go b/cmd/postgres-operator/main.go index 7e6b2da3d3..a3ae39856f 100644 --- a/cmd/postgres-operator/main.go +++ b/cmd/postgres-operator/main.go @@ -1,4 +1,4 @@ -// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -30,8 +30,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/registration" - "github.com/crunchydata/postgres-operator/internal/upgradecheck" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -121,7 +119,7 @@ func initManager() (runtime.Options, error) { func main() { // This context is canceled by SIGINT, SIGTERM, or by calling shutdown. - ctx, shutdown := context.WithCancel(runtime.SignalHandler()) + ctx := runtime.SignalHandler() otelFlush, err := initOpenTelemetry() assertNoError(err) @@ -134,7 +132,13 @@ func main() { features := feature.NewGate() assertNoError(features.Set(os.Getenv("PGO_FEATURE_GATES"))) - log.Info("feature gates enabled", "PGO_FEATURE_GATES", features.String()) + + ctx = feature.NewContext(ctx, features) + log.Info("feature gates", + // These are set by the user + "PGO_FEATURE_GATES", feature.ShowAssigned(ctx), + // These are enabled, including features that are on by default + "enabled", feature.ShowEnabled(ctx)) cfg, err := runtime.GetConfig() assertNoError(err) @@ -165,13 +169,8 @@ func main() { log.Info("detected OpenShift environment") } - registrar, err := registration.NewRunner(os.Getenv("RSA_KEY"), os.Getenv("TOKEN_PATH"), shutdown) - assertNoError(err) - assertNoError(mgr.Add(registrar)) - token, _ := registrar.CheckToken() - // add all PostgreSQL Operator controllers to the runtime manager - addControllersToManager(mgr, openshift, log, registrar) + addControllersToManager(mgr, openshift, log) if features.Enabled(feature.BridgeIdentifiers) { constructor := func() *bridge.Client { @@ -183,23 +182,6 @@ func main() { assertNoError(bridge.ManagedInstallationReconciler(mgr, constructor)) } - // Enable upgrade checking - upgradeCheckingDisabled := strings.EqualFold(os.Getenv("CHECK_FOR_UPGRADES"), "false") - if !upgradeCheckingDisabled { - log.Info("upgrade checking enabled") - // get the URL for the check for upgrades endpoint if set in the env - assertNoError( - upgradecheck.ManagedScheduler( - mgr, - openshift, - os.Getenv("CHECK_FOR_UPGRADES_URL"), - versionString, - token, - )) - } else { - log.Info("upgrade checking disabled") - } - // Enable health probes assertNoError(mgr.AddHealthzCheck("health", healthz.Ping)) assertNoError(mgr.AddReadyzCheck("check", healthz.Ping)) @@ -212,14 +194,13 @@ func main() { // addControllersToManager adds all PostgreSQL Operator controllers to the provided controller // runtime manager. -func addControllersToManager(mgr runtime.Manager, openshift bool, log logging.Logger, reg registration.Registration) { +func addControllersToManager(mgr runtime.Manager, openshift bool, log logging.Logger) { pgReconciler := &postgrescluster.Reconciler{ - Client: mgr.GetClient(), - IsOpenShift: openshift, - Owner: postgrescluster.ControllerName, - Recorder: mgr.GetEventRecorderFor(postgrescluster.ControllerName), - Registration: reg, - Tracer: otel.Tracer(postgrescluster.ControllerName), + Client: mgr.GetClient(), + IsOpenShift: openshift, + Owner: postgrescluster.ControllerName, + Recorder: mgr.GetEventRecorderFor(postgrescluster.ControllerName), + Tracer: otel.Tracer(postgrescluster.ControllerName), } if err := pgReconciler.SetupWithManager(mgr); err != nil { @@ -228,10 +209,9 @@ func addControllersToManager(mgr runtime.Manager, openshift bool, log logging.Lo } upgradeReconciler := &pgupgrade.PGUpgradeReconciler{ - Client: mgr.GetClient(), - Owner: "pgupgrade-controller", - Recorder: mgr.GetEventRecorderFor("pgupgrade-controller"), - Registration: reg, + Client: mgr.GetClient(), + Owner: "pgupgrade-controller", + Recorder: mgr.GetEventRecorderFor("pgupgrade-controller"), } if err := upgradeReconciler.SetupWithManager(mgr); err != nil { diff --git a/cmd/postgres-operator/main_test.go b/cmd/postgres-operator/main_test.go index f369ce6bd3..4bc3531d44 100644 --- a/cmd/postgres-operator/main_test.go +++ b/cmd/postgres-operator/main_test.go @@ -1,4 +1,4 @@ -// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/cmd/postgres-operator/open_telemetry.go b/cmd/postgres-operator/open_telemetry.go index 2c9eedc135..f82d8ccbac 100644 --- a/cmd/postgres-operator/open_telemetry.go +++ b/cmd/postgres-operator/open_telemetry.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/config/README.md b/config/README.md index 00ebaf8833..c29bb9d5bf 100644 --- a/config/README.md +++ b/config/README.md @@ -1,5 +1,5 @@ diff --git a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml index 7174930bd9..acc52d2688 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_crunchybridgeclusters.yaml @@ -156,6 +156,7 @@ spec: - plan - provider - region + - secret - storage type: object status: diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index 4bcdce7f00..b22f8adc17 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -1900,6 +1900,10 @@ spec: description: MajorVersion represents the major version of the running pgAdmin. type: integer + minorVersion: + description: MinorVersion represents the minor version of the running + pgAdmin. + type: string observedGeneration: description: observedGeneration represents the .metadata.generation on which the status was based. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 4f79a80125..9372036f21 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -6451,6 +6451,10 @@ spec: - repo - stanza type: object + x-kubernetes-validations: + - message: Only S3, GCS or Azure repos can be used as a pgBackRest + data source. + rule: '!has(self.repo.volume)' postgresCluster: description: |- Defines a pgBackRest data source that can be used to pre-populate the PostgreSQL data @@ -17229,11 +17233,6 @@ spec: type: integer type: object type: object - registrationRequired: - properties: - pgoVersion: - type: string - type: object startupInstance: description: |- The instance that should be started first when bootstrapping and/or starting a @@ -17242,8 +17241,6 @@ spec: startupInstanceSet: description: The instance set associated with the startupInstance type: string - tokenRequired: - type: string userInterface: description: Current state of the PostgreSQL user interface. properties: diff --git a/config/default/kustomization.yaml b/config/default/kustomization.yaml index 82b2310ca0..5bb163b987 100644 --- a/config/default/kustomization.yaml +++ b/config/default/kustomization.yaml @@ -17,4 +17,4 @@ resources: images: - name: postgres-operator newName: registry.developers.crunchydata.com/crunchydata/postgres-operator - newTag: latest + newTag: ubi8-5.7.9-0 diff --git a/config/manager/manager.yaml b/config/manager/manager.yaml index 3aa9198676..fb2c4e0ef6 100644 --- a/config/manager/manager.yaml +++ b/config/manager/manager.yaml @@ -23,27 +23,29 @@ spec: - name: CRUNCHY_DEBUG value: "true" - name: RELATED_IMAGE_POSTGRES_15 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.7-1" - - name: RELATED_IMAGE_POSTGRES_15_GIS_3.3 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-15.7-3.3-1" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-15.15-2547" - name: RELATED_IMAGE_POSTGRES_16 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.11-2547" - name: RELATED_IMAGE_POSTGRES_16_GIS_3.3 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.3-1" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.11-3.3-2547" - name: RELATED_IMAGE_POSTGRES_16_GIS_3.4 - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.3-3.4-1" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-16.11-3.4-2547" + - name: RELATED_IMAGE_POSTGRES_17 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-17.7-2547" + - name: RELATED_IMAGE_POSTGRES_17_GIS_3.4 + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-gis:ubi8-17.7-3.4-2547" - name: RELATED_IMAGE_PGADMIN - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-26" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-4.30-2547" - name: RELATED_IMAGE_PGBACKREST - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-1" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.56.0-2547" - name: RELATED_IMAGE_PGBOUNCER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-1" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.24-2547" - name: RELATED_IMAGE_PGEXPORTER - value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:latest" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-1.17.1-2547" - name: RELATED_IMAGE_PGUPGRADE - value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:latest" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-upgrade:ubi8-17.7-2547" - name: RELATED_IMAGE_STANDALONE_PGADMIN - value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-8.6-1" + value: "registry.developers.crunchydata.com/crunchydata/crunchy-pgadmin4:ubi8-9.8-2547" securityContext: allowPrivilegeEscalation: false capabilities: { drop: [ALL] } diff --git a/config/singlenamespace/kustomization.yaml b/config/singlenamespace/kustomization.yaml index a6dc8de538..5dfef5083b 100644 --- a/config/singlenamespace/kustomization.yaml +++ b/config/singlenamespace/kustomization.yaml @@ -16,7 +16,7 @@ resources: images: - name: postgres-operator newName: registry.developers.crunchydata.com/crunchydata/postgres-operator - newTag: latest + newTag: ubi8-5.7.9-0 patches: - path: manager-target.yaml diff --git a/examples/postgrescluster/postgrescluster.yaml b/examples/postgrescluster/postgrescluster.yaml index 7ad4524571..d58a7d2689 100644 --- a/examples/postgrescluster/postgrescluster.yaml +++ b/examples/postgrescluster/postgrescluster.yaml @@ -3,7 +3,7 @@ kind: PostgresCluster metadata: name: hippo spec: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.3-1 + image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-16.11-2547 postgresVersion: 16 instances: - name: instance1 @@ -15,7 +15,7 @@ spec: storage: 1Gi backups: pgbackrest: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.51-1 + image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbackrest:ubi8-2.56.0-2547 repos: - name: repo1 volume: @@ -35,4 +35,4 @@ spec: storage: 1Gi proxy: pgBouncer: - image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.22-1 + image: registry.developers.crunchydata.com/crunchydata/crunchy-pgbouncer:ubi8-1.24-2547 diff --git a/go.mod b/go.mod index 04adda6833..7ea5e074f3 100644 --- a/go.mod +++ b/go.mod @@ -1,14 +1,10 @@ module github.com/crunchydata/postgres-operator -go 1.22.0 - -toolchain go1.22.4 +go 1.24.0 require ( github.com/go-logr/logr v1.4.2 - github.com/golang-jwt/jwt/v5 v5.2.1 github.com/google/go-cmp v0.6.0 - github.com/google/uuid v1.6.0 github.com/kubernetes-csi/external-snapshotter/client/v8 v8.0.0 github.com/onsi/ginkgo/v2 v2.17.2 github.com/onsi/gomega v1.33.1 @@ -23,7 +19,7 @@ require ( go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.2.0 go.opentelemetry.io/otel/sdk v1.27.0 go.opentelemetry.io/otel/trace v1.27.0 - golang.org/x/crypto v0.27.0 + golang.org/x/crypto v0.45.0 gotest.tools/v3 v3.1.0 k8s.io/api v0.30.2 k8s.io/apimachinery v0.30.2 @@ -55,6 +51,7 @@ require ( github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/pprof v0.0.0-20240424215950-a892ee059fd6 // indirect + github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect github.com/imdario/mergo v0.3.16 // indirect @@ -74,13 +71,13 @@ require ( go.opentelemetry.io/otel/metric v1.27.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect - golang.org/x/net v0.29.0 // indirect - golang.org/x/oauth2 v0.21.0 // indirect - golang.org/x/sys v0.25.0 // indirect - golang.org/x/term v0.24.0 // indirect - golang.org/x/text v0.18.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.22.0 // indirect + golang.org/x/tools v0.38.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect diff --git a/go.sum b/go.sum index aed2056f6f..5a8052bdc6 100644 --- a/go.sum +++ b/go.sum @@ -38,8 +38,6 @@ github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1v github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.2.1 h1:OuVbFODueb089Lh128TAcimifWaLhJwVflnrgM17wHk= -github.com/golang-jwt/jwt/v5 v5.2.1/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= @@ -155,8 +153,8 @@ go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.27.0 h1:GXm2NjJrPaiv/h1tb2UH8QfgC/hOf/+z0p6PT8o1w7A= -golang.org/x/crypto v0.27.0/go.mod h1:1Xngt8kV6Dvbssa53Ziq6Eqn0HqbZi5Z6R0ZpwQzt70= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuhGX/+R1ZpfJ4/ia80JM= golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -165,10 +163,10 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.29.0 h1:5ORfpBpCs4HzDYoodCDBbwHzdR5UrLBZ3sOnUJmFoHo= -golang.org/x/net v0.29.0/go.mod h1:gLkgy8jTGERgjzMic6DS9+SP0ajcu6Xu3Orq/SpETg0= -golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= -golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -178,15 +176,15 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.25.0 h1:r+8e+loiHxRqhXVl6ML1nO3l1+oFoWbnlu2Ehimmi34= -golang.org/x/sys v0.25.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.24.0 h1:Mh5cbb+Zk2hqqXNO7S1iTjEphVL+jb8ZWaqh/g+JWkM= -golang.org/x/term v0.24.0/go.mod h1:lOBK/LVxemqiMij05LGJ0tzNr8xlmwBRJ81PX6wVLH8= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.18.0 h1:XvMDiNzPAl0jr17s6W9lcaIhGUfUORdGCNsuLmPG224= -golang.org/x/text v0.18.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -194,8 +192,8 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA= -golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/hack/boilerplate.go.txt b/hack/boilerplate.go.txt index 7fc3d63c10..7c662ee243 100644 --- a/hack/boilerplate.go.txt +++ b/hack/boilerplate.go.txt @@ -1,3 +1,3 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/hack/create-kubeconfig.sh b/hack/create-kubeconfig.sh index 3bebcd194e..87aed13291 100755 --- a/hack/create-kubeconfig.sh +++ b/hack/create-kubeconfig.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +# Copyright 2021 - 2025 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/hack/create-todo-patch.sh b/hack/create-todo-patch.sh index 7aab184a3a..b730387747 100755 --- a/hack/create-todo-patch.sh +++ b/hack/create-todo-patch.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +# Copyright 2021 - 2025 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/hack/generate-rbac.sh b/hack/generate-rbac.sh index 4ad430a5e9..1b751035f7 100755 --- a/hack/generate-rbac.sh +++ b/hack/generate-rbac.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +# Copyright 2021 - 2025 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/hack/go-get.sh b/hack/go-get.sh new file mode 100755 index 0000000000..9dbfb96640 --- /dev/null +++ b/hack/go-get.sh @@ -0,0 +1,37 @@ +#!/usr/bin/env bash +# +# Copyright 2025 Crunchy Data Solutions, Inc. +# +# SPDX-License-Identifier: Apache-2.0 +# +# This runs `$GO get` without changing the "go" directive in the "go.mod" file. +# To change that, pass a "go@go{version}" argument. +# +# https://go.dev/doc/toolchain +# +# Usage: $0 help +# Usage: $0 -u golang.org/x/crypto +# Usage: $0 -u golang.org/x/crypto go@go1.99.0 +# + +set -eu +: "${GO:=go}" + +if [[ "$#" -eq 0 ]] || [[ "$1" == 'help' ]] || [[ "$*" == *'--help'* ]] || [[ "$*" == *'--version'* ]] +then + self=$(command -v -- "$0") + content=$(< "${self}") + content="${content%%$'\n\n'*}" + content="#${content#*$'\n#'}" + content="${content//$'$GO'/${GO}}" + exec echo "${content//$'$0'/$0}" +fi + +version=$(${GO} list -m -f 'go@go{{.GoVersion}}') + +for arg in "$@" +do case "${arg}" in go@go*) version="${arg}" ;; *) esac +done + +${GO} get "$@" "${version}" 'toolchain@none' +${GO} mod tidy diff --git a/hack/update-pgmonitor-installer.sh b/hack/update-pgmonitor-installer.sh index 148a4761c9..827614d526 100755 --- a/hack/update-pgmonitor-installer.sh +++ b/hack/update-pgmonitor-installer.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +# Copyright 2022 - 2025 Crunchy Data Solutions, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at diff --git a/installers/olm/.gitignore b/installers/olm/.gitignore deleted file mode 100644 index a2d12b4ff2..0000000000 --- a/installers/olm/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -/bundles/ -/projects/ -/tools/ -/config/marketplace diff --git a/installers/olm/Makefile b/installers/olm/Makefile deleted file mode 100644 index 9784d352cf..0000000000 --- a/installers/olm/Makefile +++ /dev/null @@ -1,112 +0,0 @@ -.DEFAULT_GOAL := help -.SUFFIXES: - -CONTAINER ?= docker -PGO_VERSION ?= latest -REPLACES_VERSION ?= 5.x.y - -OS_KERNEL ?= $(shell bash -c 'echo $${1,,}' - `uname -s`) -OS_MACHINE ?= $(shell bash -c 'echo $${1/x86_/amd}' - `uname -m`) -SYSTEM = $(OS_KERNEL)-$(OS_MACHINE) - -export PATH := $(CURDIR)/tools/$(SYSTEM):$(PATH) - -export PGO_VERSION - -export REPLACES_VERSION - -distros = community redhat marketplace - -.PHONY: bundles -bundles: ## Build OLM bundles -bundles: $(distros:%=bundles/%) - -# https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/#validating-your-bundle -# https://github.com/operator-framework/community-operators/blob/8a36a33/docs/packaging-required-criteria-ocp.md -.PHONY: bundles/community -bundles/community: - ./generate.sh community - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - env operator-sdk bundle validate $@ --select-optional='name=community' --optional-values='index-path=$@/Dockerfile' - -# https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/reviewing-your-metadata-bundle -.PHONY: bundles/redhat -bundles/redhat: - ./generate.sh redhat - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - -# The 'marketplace' configuration is currently identical to the 'redhat', so we just copy it here. -.PHONY: bundles/marketplace -bundles/marketplace: - cp -r ./config/redhat/ ./config/marketplace - ./generate.sh marketplace - env operator-sdk bundle validate $@ --select-optional='suite=operatorframework' - -.PHONY: clean -clean: clean-deprecated -clean: ## Remove generated files and downloaded tools - rm -rf ./bundles ./projects ./tools ./config/marketplace - -.PHONY: clean-deprecated -clean-deprecated: - rm -rf ./package - -.PHONY: help -help: ALIGN=18 -help: ## Print this message - @awk -F ': ## ' -- "/^[^':]+: ## /"' { printf "'$$(tput bold)'%-$(ALIGN)s'$$(tput sgr0)' %s\n", $$1, $$2 }' $(MAKEFILE_LIST) - -.PHONY: install-olm -install-olm: ## Install OLM in Kubernetes - env operator-sdk olm install - -.PHONY: tools -tools: ## Download tools needed to build bundles - -tools: tools/$(SYSTEM)/jq -tools/$(SYSTEM)/jq: - install -d '$(dir $@)' - curl -fSL -o '$@' "https://github.com/jqlang/jq/releases/download/jq-1.6/jq-$$(SYSTEM='$(SYSTEM)'; \ - case "$$SYSTEM" in \ - (linux-*) echo "$${SYSTEM/-amd/}";; (darwin-*) echo "$${SYSTEM/darwin/osx}";; (*) echo '$(SYSTEM)';; \ - esac)" - chmod u+x '$@' - -tools: tools/$(SYSTEM)/kubectl -tools/$(SYSTEM)/kubectl: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://dl.k8s.io/release/$(shell curl -Ls https://dl.k8s.io/release/stable-1.21.txt)/bin/$(OS_KERNEL)/$(OS_MACHINE)/kubectl' - chmod u+x '$@' - -# quay.io/operator-framework/operator-sdk -tools: tools/$(SYSTEM)/operator-sdk -tools/$(SYSTEM)/operator-sdk: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://github.com/operator-framework/operator-sdk/releases/download/v1.18.0/operator-sdk_$(OS_KERNEL)_$(OS_MACHINE)' - chmod u+x '$@' - -tools: tools/$(SYSTEM)/opm -tools/$(SYSTEM)/opm: - install -d '$(dir $@)' - curl -fSL -o '$@' 'https://github.com/operator-framework/operator-registry/releases/download/v1.33.0/$(OS_KERNEL)-$(OS_MACHINE)-opm' - chmod u+x '$@' - -tools/$(SYSTEM)/venv: - install -d '$(dir $@)' - python3 -m venv '$@' - -tools: tools/$(SYSTEM)/yq -tools/$(SYSTEM)/yq: | tools/$(SYSTEM)/venv - 'tools/$(SYSTEM)/venv/bin/python' -m pip install yq - cd '$(dir $@)' && ln -s venv/bin/yq - -.PHONY: validate-bundles -validate-bundles: ## Build temporary bundle images and run scorecard tests in Kubernetes -validate-bundles: $(distros:%=validate-%-image) -validate-bundles: $(distros:%=validate-%-directory) - -validate-%-directory: - ./validate-directory.sh 'bundles/$*' - -validate-%-image: - ./validate-image.sh '$(CONTAINER)' 'bundles/$*' diff --git a/installers/olm/README.md b/installers/olm/README.md deleted file mode 100644 index e067c86b39..0000000000 --- a/installers/olm/README.md +++ /dev/null @@ -1,147 +0,0 @@ -This directory contains the files that are used to install [Crunchy PostgreSQL for Kubernetes][hub-listing], -which includes PGO, the Postgres Operator from [Crunchy Data][], using [Operator Lifecycle Manager][OLM]. - -The integration centers around a [ClusterServiceVersion][olm-csv] [manifest](./bundle.csv.yaml) -that gets packaged for OperatorHub. Changes there are accepted only if they pass all the [scorecard][] -tests. Consult the [technical requirements][hub-contrib] when making changes. - - - -[Crunchy Data]: https://www.crunchydata.com -[hub-contrib]: https://operator-framework.github.io/community-operators/packaging-operator/ -[hub-listing]: https://operatorhub.io/operator/postgresql -[OLM]: https://github.com/operator-framework/operator-lifecycle-manager -[olm-csv]: https://github.com/operator-framework/operator-lifecycle-manager/blob/master/doc/design/building-your-csv.md -[scorecard]: https://sdk.operatorframework.io/docs/testing-operators/scorecard/ - -[Red Hat Container Certification]: https://redhat-connect.gitbook.io/partner-guide-for-red-hat-openshift-and-container/ -[Red Hat Operator Certification]: https://redhat-connect.gitbook.io/certified-operator-guide/ - - - -## Notes - -### v5 Versions per Repository - -Community: https://github.com/k8s-operatorhub/community-operators/tree/main/operators/postgresql - -5.0.2 -5.0.3 -5.0.4 -5.0.5 -5.1.0 - -Community Prod: https://github.com/redhat-openshift-ecosystem/community-operators-prod/tree/main/operators/postgresql - -5.0.2 -5.0.3 -5.0.4 -5.0.5 -5.1.0 - -Certified: https://github.com/redhat-openshift-ecosystem/certified-operators/tree/main/operators/crunchy-postgres-operator - -5.0.4 -5.0.5 -5.1.0 - -Marketplace: https://github.com/redhat-openshift-ecosystem/redhat-marketplace-operators/tree/main/operators/crunchy-postgres-operator-rhmp - -5.0.4 -5.0.5 -5.1.0 - -### Issues Encountered - -We hit various issues with 5.1.0 where the 'replaces' name, set in the clusterserviceversion.yaml, didn't match the -expected names found for all indexes. Previously, we set the 'com.redhat.openshift.versions' annotation to "v4.6-v4.9". -The goal for this setting was to limit the upper bound of supported versions for a particularly PGO release. -The problem with this was, at the time of the 5.1.0 release, OCP 4.10 had been just been released. This meant that the -5.0.5 bundle did not exist in the OCP 4.10 index. The solution presented by Red Hat was to use the 'skips' clause for -the 5.1.0 release to remedy the immediate problem, but then go back to using an unbounded setting for subsequent -releases. - -For the certified, marketplace and community repositories, this strategy of using 'skips' instead of replaces worked as -expected. However, for the production community operator bundle, we were seeing a failure that required adding an -additional 'replaces' value of 5.0.4 in addition to the 5.0.5 'skips' value. While this allowed the PR to merge, it -seems at odds with the behavior at the other repos. - -For more information on the use of 'skips' and 'replaces', please see: -https://olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/ - - -Another version issue encountered was related to our attempt to both support OCP v4.6 (which is an Extended Update -Support (EUS) release) while also limiting Kubernetes to 1.20+. The issue with this is that OCP 4.6 utilizes k8s 1.19 -and the kube minversion validation was in fact limiting the OCP version as well. Our hope was that those setting would -be treated independently, but that was unfortunately not the case. The fix for this was to move this kube version to the -1.19, despite its being released 3rd quarter of 2020 with 1 year of patch support. - -Following the lessons learned above, when bumping the Openshift supported version from v4.6 to v4.8, we will similarly -keep the matching minimum Kubernetes version, i.e. 1.21. -https://access.redhat.com/solutions/4870701 - -## Testing - -### Setup - -```sh -make tools -``` - -### Testing - -```sh -make bundles validate-bundles -``` - -Previously, the 'validate_bundle_image' function in validate-bundles.sh ended -with the following command: - -```sh - # Create an index database from the bundle image. - "${opm[@]}" index add --bundles="${image}" --generate - - # drwxr-xr-x. 2 user user 22 database - # -rw-r--r--. 1 user user 286720 database/index.db - # -rw-r--r--. 1 user user 267 index.Dockerfile -``` - -this command was used to generate the updated registry database, but this step -is no longer required when validating the OLM bundles. -- https://github.com/operator-framework/operator-registry/blob/master/docs/design/opm-tooling.md#add-1 - -```sh -BUNDLE_DIRECTORY='bundles/community' -BUNDLE_IMAGE='gcr.io/.../postgres-operator-bundle:latest' -INDEX_IMAGE='gcr.io/.../postgres-operator-bundle-index:latest' -NAMESPACE='pgo' - -docker build --tag "$BUNDLE_IMAGE" "$BUNDLE_DIRECTORY" -docker push "$BUNDLE_IMAGE" - -opm index add --bundles "$BUNDLE_IMAGE" --tag "$INDEX_IMAGE" --container-tool=docker -docker push "$INDEX_IMAGE" - -./install.sh operator "$BUNDLE_DIRECTORY" "$INDEX_IMAGE" "$NAMESPACE" "$NAMESPACE" - -# Cleanup -operator-sdk cleanup postgresql --namespace="$NAMESPACE" -kubectl -n "$NAMESPACE" delete operatorgroup olm-operator-group -``` - -### Post Bundle Generation - -After generating and testing the OLM bundles, there are two manual steps. - -1. Update the image SHA values (denoted with '', required for both the Red Hat 'Certified' and -'Marketplace' bundles) -2. Update the 'description.md' file to indicate which OCP versions this release of PGO was tested against. - -### Troubleshooting - -If, when running `make validate-bundles` you encounter an error similar to - -`cannot find Containerfile or Dockerfile in context directory: stat /mnt/Dockerfile: permission denied` - -the target command is likely being blocked by SELinux and you will need to adjust -your settings accordingly. diff --git a/installers/olm/bundle.Dockerfile b/installers/olm/bundle.Dockerfile deleted file mode 100644 index a81d16f73e..0000000000 --- a/installers/olm/bundle.Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# Used to build the bundle image. This file is ignored by the community operator -# registries which work with bundle directories instead. -# https://operator-framework.github.io/community-operators/packaging-operator/ - -FROM scratch AS builder - -COPY manifests/ /build/manifests/ -COPY metadata/ /build/metadata/ -COPY tests/ /build/tests - - -FROM scratch - -# ANNOTATIONS is replaced with bundle.annotations.yaml -LABEL \ - ${ANNOTATIONS} - -COPY --from=builder /build/ / diff --git a/installers/olm/bundle.annotations.yaml b/installers/olm/bundle.annotations.yaml deleted file mode 100644 index 27dce5aa07..0000000000 --- a/installers/olm/bundle.annotations.yaml +++ /dev/null @@ -1,38 +0,0 @@ ---- -annotations: - # https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/ - # https://docs.openshift.com/container-platform/4.7/operators/understanding/olm-packaging-format.html - operators.operatorframework.io.bundle.mediatype.v1: registry+v1 - operators.operatorframework.io.bundle.manifests.v1: manifests/ - operators.operatorframework.io.bundle.metadata.v1: metadata/ - - operators.operatorframework.io.test.mediatype.v1: scorecard+v1 - operators.operatorframework.io.test.config.v1: tests/scorecard/ - - # "package.v1" is the name of the PackageManifest. It also determines the URL - # of the details page at OperatorHub.io; "postgresql" here becomes: - # https://operatorhub.io/operator/postgresql - # - # A package consists of multiple bundles (versions) arranged into channels. - # https://olm.operatorframework.io/docs/concepts/olm-architecture/operator-catalog/creating-an-update-graph/ - operators.operatorframework.io.bundle.package.v1: '' # generate.sh - - # "channels.v1" is the comma-separated list of channels from which this bundle - # can be installed. - # - # "channel.default.v1" is the default channel of the PackageManifest. It is - # the first channel presented, the first used to satisfy dependencies, and - # the one used by a Subscription that does not specify a channel. OLM uses - # the value from the bundle with the highest semantic version. - # - # https://olm.operatorframework.io/docs/best-practices/channel-naming/ - operators.operatorframework.io.bundle.channels.v1: v5 - operators.operatorframework.io.bundle.channel.default.v1: v5 - - # OpenShift v4.9 is the lowest version supported for v5.3.0+. - # https://github.com/operator-framework/community-operators/blob/8a36a33/docs/packaging-required-criteria-ocp.md - # https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/bundle-directory - com.redhat.delivery.operator.bundle: true - com.redhat.openshift.versions: 'v4.10' - -... diff --git a/installers/olm/bundle.csv.yaml b/installers/olm/bundle.csv.yaml deleted file mode 100644 index 600f8b1bc0..0000000000 --- a/installers/olm/bundle.csv.yaml +++ /dev/null @@ -1,84 +0,0 @@ -# https://olm.operatorframework.io/docs/concepts/crds/clusterserviceversion/ -# https://docs.openshift.com/container-platform/4.7/operators/operator_sdk/osdk-generating-csvs.html -# https://redhat-connect.gitbook.io/certified-operator-guide/ocp-deployment/operator-metadata/creating-the-csv -# https://pkg.go.dev/github.com/operator-framework/api@v0.10.1/pkg/operators/v1alpha1#ClusterServiceVersion - -apiVersion: operators.coreos.com/v1alpha1 -kind: ClusterServiceVersion -metadata: - name: '' # generate.sh - annotations: - support: crunchydata.com - olm.properties: '[]' - - # The following affect how the package is indexed at OperatorHub.io: - # https://operatorhub.io/?category=Database - # https://sdk.operatorframework.io/docs/advanced-topics/operator-capabilities/operator-capabilities/ - categories: Database - capabilities: Auto Pilot - description: Production Postgres Made Easy - - # The following appear on the details page at OperatorHub.io: - # https://operatorhub.io/operator/postgresql - createdAt: 2019-12-31 19:40Z - repository: https://github.com/CrunchyData/postgres-operator - containerImage: # kustomize config/operator - alm-examples: |- # kustomize config/examples - -spec: - # The following affect how the package is indexed at OperatorHub.io: - # https://operatorhub.io/ - displayName: Crunchy Postgres for Kubernetes - provider: - # These values become labels on the PackageManifest. - name: Crunchy Data - url: https://www.crunchydata.com/ - keywords: - - postgres - - postgresql - - database - - sql - - operator - - crunchy data - - # The following appear on the details page at OperatorHub.io: - # https://operatorhub.io/operator/postgresql - description: |- # description.md - version: '' # generate.sh - links: - - name: Crunchy Data - url: https://www.crunchydata.com/ - - name: Documentation - url: https://access.crunchydata.com/documentation/postgres-operator/v5/ - maintainers: - - name: Crunchy Data - email: info@crunchydata.com - - # https://olm.operatorframework.io/docs/best-practices/common/ - # Note: The minKubeVersion must correspond to the lowest supported OCP version - minKubeVersion: 1.23.0 - maturity: stable - # https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/how-to-update-operators.md#replaces--channels - replaces: '' # generate.sh - - # https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/building-your-csv.md#your-custom-resource-definitions - customresourcedefinitions: - # The "displayName" and "description" fields appear in the "Custom Resource Definitions" section - # on the details page at OperatorHub.io: https://operatorhub.io/operator/postgresql - # - # The "specDescriptors" and "statusDescriptors" fields appear in the OpenShift Console: - # https://github.com/openshift/console/tree/a8b35e4/frontend/packages/operator-lifecycle-manager/src/components/descriptors - owned: # operator-sdk generate kustomize manifests - - # https://olm.operatorframework.io/docs/advanced-tasks/operator-scoping-with-operatorgroups/ - installModes: - - { type: OwnNamespace, supported: true } - - { type: SingleNamespace, supported: true } - - { type: MultiNamespace, supported: false } - - { type: AllNamespaces, supported: true } - - install: - strategy: deployment - spec: - permissions: # kustomize config/operator - deployments: # kustomize config/operator diff --git a/installers/olm/bundle.relatedImages.yaml b/installers/olm/bundle.relatedImages.yaml deleted file mode 100644 index 3824b27b2e..0000000000 --- a/installers/olm/bundle.relatedImages.yaml +++ /dev/null @@ -1,25 +0,0 @@ - relatedImages: - - name: PGADMIN - image: registry.connect.redhat.com/crunchydata/crunchy-pgadmin4@sha256: - - name: PGBACKREST - image: registry.connect.redhat.com/crunchydata/crunchy-pgbackrest@sha256: - - name: PGBOUNCER - image: registry.connect.redhat.com/crunchydata/crunchy-pgbouncer@sha256: - - name: PGEXPORTER - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-exporter@sha256: - - name: PGUPGRADE - image: registry.connect.redhat.com/crunchydata/crunchy-upgrade@sha256: - - name: POSTGRES_14 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256: - - name: POSTGRES_15 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256: - - name: POSTGRES_14_GIS_3.1 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: POSTGRES_14_GIS_3.2 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: POSTGRES_14_GIS_3.3 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: POSTGRES_15_GIS_3.3 - image: registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256: - - name: postgres-operator - image: registry.connect.redhat.com/crunchydata/postgres-operator@sha256: diff --git a/installers/olm/config/community/kustomization.yaml b/installers/olm/config/community/kustomization.yaml deleted file mode 100644 index a34c7b4844..0000000000 --- a/installers/olm/config/community/kustomization.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- ../operator -- ../examples diff --git a/installers/olm/config/examples/kustomization.yaml b/installers/olm/config/examples/kustomization.yaml deleted file mode 100644 index 420c2644f7..0000000000 --- a/installers/olm/config/examples/kustomization.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# Custom resources that are imported into the ClusterServiceVersion. -# -# The first for each GVK appears in the "Custom Resource Definitions" section on -# the details page at OperatorHub.io: https://operatorhub.io/operator/postgresql -# -# The "metadata.name" fields should be unique so they can be given a description -# that is presented by compatible UIs. -# https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.18.2/doc/design/building-your-csv.md#crd-templates -# -# The "image" fields should be omitted so the defaults are used. -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators - -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- postgrescluster.example.yaml -- pgadmin.example.yaml -- pgupgrade.example.yaml diff --git a/installers/olm/config/examples/pgadmin.example.yaml b/installers/olm/config/examples/pgadmin.example.yaml deleted file mode 100644 index 7ed1d3c03f..0000000000 --- a/installers/olm/config/examples/pgadmin.example.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: example-pgadmin - namespace: openshift-operators -spec: - dataVolumeClaimSpec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 1Gi - serverGroups: - - name: "Crunchy Postgres for Kubernetes" - postgresClusterSelector: {} diff --git a/installers/olm/config/examples/pgupgrade.example.yaml b/installers/olm/config/examples/pgupgrade.example.yaml deleted file mode 100644 index ad4f45310a..0000000000 --- a/installers/olm/config/examples/pgupgrade.example.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGUpgrade -metadata: - name: example-upgrade -spec: - postgresClusterName: example - fromPostgresVersion: 14 - toPostgresVersion: 15 diff --git a/installers/olm/config/examples/postgrescluster.example.yaml b/installers/olm/config/examples/postgrescluster.example.yaml deleted file mode 100644 index 502eaff437..0000000000 --- a/installers/olm/config/examples/postgrescluster.example.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: example -spec: - postgresVersion: 15 - instances: - - replicas: 1 - dataVolumeClaimSpec: - accessModes: [ReadWriteOnce] - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: ["ReadWriteOnce"] - resources: - requests: - storage: 1Gi diff --git a/installers/olm/config/operator/kustomization.yaml b/installers/olm/config/operator/kustomization.yaml deleted file mode 100644 index dfdce41618..0000000000 --- a/installers/olm/config/operator/kustomization.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: -- ../../../../config/default - -patches: -- path: target-namespace.yaml diff --git a/installers/olm/config/operator/target-namespace.yaml b/installers/olm/config/operator/target-namespace.yaml deleted file mode 100644 index d7dbaadeef..0000000000 --- a/installers/olm/config/operator/target-namespace.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - env: - # https://docs.openshift.com/container-platform/4.7/operators/understanding/olm/olm-understanding-operatorgroups.html - - name: PGO_TARGET_NAMESPACE - valueFrom: { fieldRef: { fieldPath: "metadata.annotations['olm.targetNamespaces']" } } diff --git a/installers/olm/config/redhat/kustomization.yaml b/installers/olm/config/redhat/kustomization.yaml deleted file mode 100644 index 4d28b460a2..0000000000 --- a/installers/olm/config/redhat/kustomization.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: kustomize.config.k8s.io/v1beta1 -kind: Kustomization - -resources: - - ../operator - - ../examples - -patches: - - path: related-images.yaml - - path: registration.yaml diff --git a/installers/olm/config/redhat/registration.yaml b/installers/olm/config/redhat/registration.yaml deleted file mode 100644 index 8aa8a70ceb..0000000000 --- a/installers/olm/config/redhat/registration.yaml +++ /dev/null @@ -1,43 +0,0 @@ -# Red Hat Marketplace requires that bundles work offline. OSBS will fill out -# the "spec.relatedImages" field of the ClusterServiceVersion if it is blank. -# -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators -# https://osbs.readthedocs.io/en/latest/users.html#pinning-pullspecs-for-related-images -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - env: - - { name: REGISTRATION_REQUIRED, value: "true" } - - { name: TOKEN_PATH, value: "/etc/cpk/cpk_token" } - - name: REGISTRATION_URL - value: "https://access.crunchydata.com/register-cpk" - - name: RSA_KEY - value: |- - -----BEGIN PUBLIC KEY----- - MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA0JWaCc/F+/uV5zJQ7ryN - uzvO+oGgT7z9uXm11qtKae86H3Z3W4qX+gGPs5LrFg444yDRMLqKzPLwuS2yc4mz - QxtVbJyBZijbEDVd/knycj6MxFdBkbjxeGeWYT8nuZf4jBnWB48/O+uUnCbIYt8Q - hUtyJ+KMIXkxrOd4mOgL6dQSCEAIcxBh10ZAucDQIgCn2BrD595uPrvlrrioV/Nq - P0w0qIaKS785YU75qM4rT8tGeWVMEGst4AaRwfV7ZdVe065TP0hjd9sv8iJkr7En - /Zym1NXcKbpwoeT3X9E7cVSARPFhZU1mmtL56wq3QLeFxef9TmVva1/Io0mKn4ah - Uly5jgOpazrXliKJUoOurfMOakkHWfqSd5EfmRTh5nBcNqxtytLdiH0WlCkPSm+Z - Ue3aY91YwcRnFhImLpbQYD5aVLAryzu+IdfRJa+zcZYSK0N8n9irg6jSrQZBct7z - OagHUc0n/ZDP/BO8m0jlpJ7jH+N31Z5qFoNSaxf5H1Y/CwByXtzHJ1k2LleYsr9k - k40nMY4l+SXCe4PmW4zW9uP3ItBWKEI2jFrRJgowQvL0MwtzDhbX9qg4+L9eBFpK - jpHXr2kgLu4srIyXH6JO5UmE/62mHZh0SuqtOT1GQqWde5RjZyidYkwkAHup/AqA - P0TPL/poQ6yvI9a0i22TCpcCAwEAAQ== - -----END PUBLIC KEY----- - volumeMounts: - - mountPath: /etc/cpk - name: cpk-registration-volume - volumes: - - name: cpk-registration-volume - secret: - optional: true - secretName: cpk-registration diff --git a/installers/olm/config/redhat/related-images.yaml b/installers/olm/config/redhat/related-images.yaml deleted file mode 100644 index 7feea0c3f2..0000000000 --- a/installers/olm/config/redhat/related-images.yaml +++ /dev/null @@ -1,78 +0,0 @@ -# Red Hat Marketplace requires that bundles work offline. OSBS will fill out -# the "spec.relatedImages" field of the ClusterServiceVersion if it is blank. -# -# https://redhat-connect.gitbook.io/certified-operator-guide/troubleshooting-and-resources/offline-enabled-operators -# https://osbs.readthedocs.io/en/latest/users.html#pinning-pullspecs-for-related-images -apiVersion: apps/v1 -kind: Deployment -metadata: - name: pgo -spec: - template: - spec: - containers: - - name: operator - image: registry.connect.redhat.com/crunchydata/postgres-operator@sha256: - env: - - { - name: RELATED_IMAGE_PGADMIN, - value: "registry.connect.redhat.com/crunchydata/crunchy-pgadmin4@sha256:", - } - - { - name: RELATED_IMAGE_STANDALONE_PGADMIN, - value: "registry.connect.redhat.com/crunchydata/crunchy-pgadmin4@sha256:", - } - - { - name: RELATED_IMAGE_PGBACKREST, - value: "registry.connect.redhat.com/crunchydata/crunchy-pgbackrest@sha256:", - } - - { - name: RELATED_IMAGE_PGBOUNCER, - value: "registry.connect.redhat.com/crunchydata/crunchy-pgbouncer@sha256:", - } - - { - name: RELATED_IMAGE_PGEXPORTER, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-exporter@sha256:", - } - - { - name: RELATED_IMAGE_PGUPGRADE, - value: "registry.connect.redhat.com/crunchydata/crunchy-upgrade@sha256:", - } - - - { - name: RELATED_IMAGE_POSTGRES_14, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_15, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_16, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres@sha256:", - } - - - { - name: RELATED_IMAGE_POSTGRES_14_GIS_3.1, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_14_GIS_3.2, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_14_GIS_3.3, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_15_GIS_3.3, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_16_GIS_3.3, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } - - { - name: RELATED_IMAGE_POSTGRES_16_GIS_3.4, - value: "registry.connect.redhat.com/crunchydata/crunchy-postgres-gis@sha256:", - } diff --git a/installers/olm/description.md b/installers/olm/description.md deleted file mode 100644 index 4528ba5aad..0000000000 --- a/installers/olm/description.md +++ /dev/null @@ -1,75 +0,0 @@ -[Crunchy Postgres for Kubernetes](https://www.crunchydata.com/products/crunchy-postgresql-for-kubernetes), is the leading Kubernetes native -Postgres solution. Built on PGO, the Postgres Operator from Crunchy Data, Crunchy Postgres for Kubernetes gives you a declarative Postgres -solution that automatically manages your PostgreSQL clusters. - -Designed for your GitOps workflows, it is [easy to get started](https://access.crunchydata.com/documentation/postgres-operator/latest/quickstart) -with Crunchy Postgres for Kubernetes. Within a few moments, you can have a production grade Postgres cluster complete with high availability, disaster -recovery, and monitoring, all over secure TLS communications. Even better, Crunchy Postgres for Kubernetes lets you easily customize your Postgres -cluster to tailor it to your workload! - -With conveniences like cloning Postgres clusters to using rolling updates to roll out disruptive changes with minimal downtime, Crunchy Postgres -for Kubernetes is ready to support your Postgres data at every stage of your release pipeline. Built for resiliency and uptime, Crunchy Postgres -for Kubernetes will keep your Postgres cluster in a desired state so you do not need to worry about it. - -Crunchy Postgres for Kubernetes is developed with many years of production experience in automating Postgres management on Kubernetes, providing -a seamless cloud native Postgres solution to keep your data always available. - -Crunchy Postgres for Kubernetes is made available to users without an active Crunchy Data subscription in connection with Crunchy Data's -[Developer Program](https://www.crunchydata.com/developers/terms-of-use). -For more information, please contact us at [info@crunchydata.com](mailto:info@crunchydata.com). - -- **PostgreSQL Cluster Provisioning**: [Create, Scale, & Delete PostgreSQL clusters with ease][provisioning], - while fully customizing your Pods and PostgreSQL configuration! -- **High-Availability**: Safe, automated failover backed by a [distributed consensus based high-availability solution][high-availability]. - Uses [Pod Anti-Affinity][k8s-anti-affinity] to help resiliency; you can configure how aggressive this can be! - Failed primaries automatically heal, allowing for faster recovery time. You can even create regularly scheduled - backups as well and set your backup retention policy -- **Disaster Recovery**: [Backups][backups] and [restores][disaster-recovery] leverage the open source [pgBackRest][] utility and - [includes support for full, incremental, and differential backups as well as efficient delta restores][backups]. - Set how long you want your backups retained for. Works great with very large databases! -- **Monitoring**: [Track the health of your PostgreSQL clusters][monitoring] using the open source [pgMonitor][] library. -- **Clone**: [Create new clusters from your existing clusters or backups][clone] with efficient data cloning. -- **TLS**: All connections are over [TLS][tls]. You can also [bring your own TLS infrastructure][tls] if you do not want to use the provided defaults. -- **Connection Pooling**: Advanced [connection pooling][pool] support using [pgBouncer][]. -- **Affinity and Tolerations**: Have your PostgreSQL clusters deployed to [Kubernetes Nodes][k8s-nodes] of your preference. - Set your [pod anti-affinity][k8s-anti-affinity], node affinity, Pod tolerations and more rules to customize your deployment topology! -- **PostgreSQL Major Version Upgrades**: Perform a [PostgreSQL major version upgrade][major-version-upgrade] declaratively. -- **Database Administration**: Easily deploy [pgAdmin4][pgadmin] to administer your PostgresClusters' databases. - The automatic discovery of PostgresClusters ensures that you are able to seamlessly access any databases within your environment from the pgAdmin4 GUI. -- **Full Customizability**: Crunchy PostgreSQL for Kubernetes makes it easy to get your own PostgreSQL-as-a-Service up and running - and fully customize your deployments, including: - - Choose the resources for your Postgres cluster: [container resources and storage size][resize-cluster]. [Resize at any time][resize-cluster] with minimal disruption. - - Use your own container image repository, including support `imagePullSecrets` and private repositories - - [Customize your PostgreSQL configuration][customize-cluster] - -and much more! - -[backups]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery -[clone]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorials/backups-disaster-recovery/disaster-recovery -[customize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorials/day-two/customize-cluster -[disaster-recovery]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/backups-disaster-recovery/disaster-recovery -[high-availability]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/day-two/high-availability -[major-version-upgrade]: https://access.crunchydata.com/documentation/postgres-operator/v5/guides/major-postgres-version-upgrade/ -[monitoring]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/day-two/monitoring -[pool]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/basic-setup/connection-pooling -[provisioning]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/basic-setup/create-cluster -[resize-cluster]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorials/cluster-management/resize-cluster -[tls]: https://access.crunchydata.com/documentation/postgres-operator/latest/tutorials/day-two/customize-cluster#customize-tls - -[k8s-anti-affinity]: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#inter-pod-affinity-and-anti-affinity -[k8s-nodes]: https://kubernetes.io/docs/concepts/architecture/nodes/ - -[pgAdmin]: https://www.pgadmin.org/ -[pgBackRest]: https://www.pgbackrest.org -[pgBouncer]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials/basic-setup/connection-pooling -[pgMonitor]: https://github.com/CrunchyData/pgmonitor - -## Post-Installation - -### Tutorial - -Want to [learn more about the PostgreSQL Operator][tutorial]? Browse through the [tutorial][] to learn more about what you can do, [join the Discord server][discord] for community support, or check out the [PGO GitHub repo][ghrepo] to learn more about the open source Postgres Operator project that powers Crunchy Postgres for Kubernetes. - -[tutorial]: https://access.crunchydata.com/documentation/postgres-operator/v5/tutorials -[discord]: https://discord.gg/a7vWKG8Ec9 -[ghrepo]: https://github.com/CrunchyData/postgres-operator diff --git a/installers/olm/generate.sh b/installers/olm/generate.sh deleted file mode 100755 index 8814bd4c75..0000000000 --- a/installers/olm/generate.sh +++ /dev/null @@ -1,203 +0,0 @@ -#!/usr/bin/env bash -# shellcheck disable=SC2016 -# vim: set noexpandtab : -set -eu - -DISTRIBUTION="$1" - -cd "${BASH_SOURCE[0]%/*}" - -bundle_directory="bundles/${DISTRIBUTION}" -project_directory="projects/${DISTRIBUTION}" -go_api_directory=$(cd ../../pkg/apis && pwd) - -# The 'operators.operatorframework.io.bundle.package.v1' package name for each -# bundle (updated for the 'certified' and 'marketplace' bundles). -package_name='postgresql' - -# The project name used by operator-sdk for initial bundle generation. -project_name='postgresoperator' - -# The prefix for the 'clusterserviceversion.yaml' file. -# Per OLM guidance, the filename for the clusterserviceversion.yaml must be prefixed -# with the Operator's package name for the 'redhat' and 'marketplace' bundles. -# https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/troubleshooting.md#get-supported-versions -file_name='postgresoperator' -case "${DISTRIBUTION}" in - # https://redhat-connect.gitbook.io/certified-operator-guide/appendix/what-if-ive-already-published-a-community-operator - 'redhat') - file_name='crunchy-postgres-operator' - package_name='crunchy-postgres-operator' - ;; - # https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/ci-pipeline.md#bundle-structure - 'marketplace') - file_name='crunchy-postgres-operator-rhmp' - package_name='crunchy-postgres-operator-rhmp' - ;; -esac - -operator_yamls=$(kubectl kustomize "config/${DISTRIBUTION}") -operator_crds=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "CustomResourceDefinition"))') -operator_deployments=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "Deployment"))') -operator_accounts=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "ServiceAccount"))') -operator_roles=$(yq <<< "${operator_yamls}" --slurp --yaml-roundtrip 'map(select(.kind == "ClusterRole"))') - -# Recreate the Operator SDK project. -[ ! -d "${project_directory}" ] || rm -r "${project_directory}" -install -d "${project_directory}" -( - cd "${project_directory}" - operator-sdk init --fetch-deps='false' --project-name=${project_name} - rm ./*.go go.* - - # Generate CRD descriptions from Go markers. - # https://sdk.operatorframework.io/docs/building-operators/golang/references/markers/ - crd_gvks=$(yq <<< "${operator_crds}" 'map({ - group: .spec.group, kind: .spec.names.kind, version: .spec.versions[].name - })') - yq --in-place --yaml-roundtrip --argjson resources "${crd_gvks}" \ - '.multigroup = true | .resources = $resources | .' ./PROJECT - - ln -s "${go_api_directory}" . - operator-sdk generate kustomize manifests --interactive='false' -) - -# Recreate the OLM bundle. -[ ! -d "${bundle_directory}" ] || rm -r "${bundle_directory}" -install -d \ - "${bundle_directory}/manifests" \ - "${bundle_directory}/metadata" \ - "${bundle_directory}/tests/scorecard" \ - -# `echo "${operator_yamls}" | operator-sdk generate bundle` includes the ServiceAccount which cannot -# be upgraded: https://github.com/operator-framework/operator-lifecycle-manager/issues/2193 - -# Include Operator SDK scorecard tests. -# https://sdk.operatorframework.io/docs/advanced-topics/scorecard/scorecard/ -kubectl kustomize "${project_directory}/config/scorecard" \ - > "${bundle_directory}/tests/scorecard/config.yaml" - -# Render bundle annotations and strip comments. -# Per Red Hat we should not include the org.opencontainers annotations in the -# 'redhat' & 'marketplace' annotations.yaml file, so only add them for 'community'. -# - https://coreos.slack.com/team/UP1LZCC1Y -if [ ${DISTRIBUTION} == 'community' ]; then -yq --yaml-roundtrip < bundle.annotations.yaml > "${bundle_directory}/metadata/annotations.yaml" \ - --arg package "${package_name}" \ -' - .annotations["operators.operatorframework.io.bundle.package.v1"] = $package | - .annotations["org.opencontainers.image.authors"] = "info@crunchydata.com" | - .annotations["org.opencontainers.image.url"] = "https://crunchydata.com" | - .annotations["org.opencontainers.image.vendor"] = "Crunchy Data" | -.' -else -yq --yaml-roundtrip < bundle.annotations.yaml > "${bundle_directory}/metadata/annotations.yaml" \ - --arg package "${package_name}" \ -' - .annotations["operators.operatorframework.io.bundle.package.v1"] = $package | -.' -fi - -# Copy annotations into Dockerfile LABELs. -labels=$(yq --raw-output < "${bundle_directory}/metadata/annotations.yaml" \ - '.annotations | to_entries | map(.key +"="+ (.value | tojson)) | join(" \\\n\t")') -ANNOTATIONS="${labels}" envsubst '$ANNOTATIONS' < bundle.Dockerfile > "${bundle_directory}/Dockerfile" - -# Include CRDs as manifests. -crd_names=$(yq --raw-output <<< "${operator_crds}" 'to_entries[] | [.key, .value.metadata.name] | @tsv') -while IFS=$'\t' read -r index name; do - yq --yaml-roundtrip <<< "${operator_crds}" ".[${index}]" > "${bundle_directory}/manifests/${name}.crd.yaml" -done <<< "${crd_names}" - - -abort() { echo >&2 "$@"; exit 1; } -dump() { yq --color-output; } - -yq > /dev/null <<< "${operator_deployments}" --exit-status 'length == 1' || - abort "too many deployments!" $'\n'"$(dump <<< "${operator_deployments}")" - -yq > /dev/null <<< "${operator_accounts}" --exit-status 'length == 1' || - abort "too many service accounts!" $'\n'"$(dump <<< "${operator_accounts}")" - -yq > /dev/null <<< "${operator_roles}" --exit-status 'length == 1' || - abort "too many roles!" $'\n'"$(dump <<< "${operator_roles}")" - -# Render bundle CSV and strip comments. - -csv_stem=$(yq --raw-output '.projectName' "${project_directory}/PROJECT") - -crd_descriptions=$(yq '.spec.customresourcedefinitions.owned' \ -"${project_directory}/config/manifests/bases/${csv_stem}.clusterserviceversion.yaml") - -crd_gvks=$(yq <<< "${operator_crds}" 'map({ - group: .spec.group, kind: .spec.names.kind, version: .spec.versions[].name -} | { - apiVersion: "\(.group)/\(.version)", kind -})') -crd_examples=$(yq <<< "${operator_yamls}" --slurp --argjson gvks "${crd_gvks}" 'map(select( - IN({ apiVersion, kind }; $gvks | .[]) -))') - -yq --yaml-roundtrip < bundle.csv.yaml > "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" \ - --argjson deployment "$(yq <<< "${operator_deployments}" 'first')" \ - --argjson account "$(yq <<< "${operator_accounts}" 'first | .metadata.name')" \ - --argjson rules "$(yq <<< "${operator_roles}" 'first | .rules')" \ - --argjson crds "${crd_descriptions}" \ - --arg examples "${crd_examples}" \ - --arg version "${PGO_VERSION}" \ - --arg replaces "${REPLACES_VERSION}" \ - --arg description "$(< description.md)" \ - --arg icon "$(base64 ../seal.svg | tr -d '\n')" \ - --arg stem "${csv_stem}" \ -' - .metadata.annotations["alm-examples"] = $examples | - .metadata.annotations["containerImage"] = ($deployment.spec.template.spec.containers[0].image) | - - .metadata.name = "\($stem).v\($version)" | - .spec.version = $version | - .spec.replaces = "\($stem).v\($replaces)" | - - .spec.customresourcedefinitions.owned = $crds | - .spec.description = $description | - .spec.icon = [{ mediatype: "image/svg+xml", base64data: $icon }] | - - .spec.install.spec.permissions = [{ serviceAccountName: $account, rules: $rules }] | - .spec.install.spec.deployments = [( $deployment | { name: .metadata.name, spec } )] | -.' - -case "${DISTRIBUTION}" in - 'redhat') - # https://redhat-connect.gitbook.io/certified-operator-guide/appendix/what-if-ive-already-published-a-community-operator - yq --in-place --yaml-roundtrip \ - ' - .metadata.annotations.certified = "true" | - .metadata.annotations["containerImage"] = "registry.connect.redhat.com/crunchydata/postgres-operator@sha256:" | - .metadata.annotations["containerImage"] = "registry.connect.redhat.com/crunchydata/postgres-operator@sha256:" | - .' \ - "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - - # Finally, add related images. NOTE: SHA values will need to be updated - # -https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/troubleshooting.md#digest-pinning - cat bundle.relatedImages.yaml >> "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - ;; - 'marketplace') - # Annotations needed when targeting Red Hat Marketplace - # https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/ci-pipeline.md#bundle-structure - yq --in-place --yaml-roundtrip \ - --arg package_url "https://marketplace.redhat.com/en-us/operators/${file_name}" \ - ' - .metadata.annotations["containerImage"] = "registry.connect.redhat.com/crunchydata/postgres-operator@sha256:" | - .metadata.annotations["marketplace.openshift.io/remote-workflow"] = - "\($package_url)/pricing?utm_source=openshift_console" | - .metadata.annotations["marketplace.openshift.io/support-workflow"] = - "\($package_url)/support?utm_source=openshift_console" | - .' \ - "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - - # Finally, add related images. NOTE: SHA values will need to be updated - # -https://github.com/redhat-openshift-ecosystem/certification-releases/blob/main/4.9/ga/troubleshooting.md#digest-pinning - cat bundle.relatedImages.yaml >> "${bundle_directory}/manifests/${file_name}.clusterserviceversion.yaml" - ;; -esac - -if > /dev/null command -v tree; then tree -C "${bundle_directory}"; fi diff --git a/installers/olm/install.sh b/installers/olm/install.sh deleted file mode 100755 index 2c4f6ce190..0000000000 --- a/installers/olm/install.sh +++ /dev/null @@ -1,144 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -if command -v oc >/dev/null; then - kubectl() { oc "$@"; } - kubectl version -else - kubectl version --short -fi - -catalog_source() ( - source_namespace="$1" - source_name="$2" - index_image="$3" - - kc() { kubectl --namespace="$source_namespace" "$@"; } - kc get namespace "$source_namespace" --output=jsonpath='{""}' 2>/dev/null || - kc create namespace "$source_namespace" - - # See https://godoc.org/github.com/operator-framework/api/pkg/operators/v1alpha1#CatalogSource - source_json=$(jq --null-input \ - --arg name "${source_name}" \ - --arg image "${index_image}" \ - '{ - apiVersion: "operators.coreos.com/v1alpha1", kind: "CatalogSource", - metadata: { name: $name }, - spec: { - displayName: "Test Registry", - sourceType: "grpc", image: $image - } - }') - kc create --filename=- <<< "$source_json" - - # Wait for Pod to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get pod --selector="olm.catalogSource=${source_name}" --output=jsonpath='{.items}' )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=ready' --timeout='30s' pod --selector="olm.catalogSource=${source_name}"; then - kc logs --previous --tail='-1' --selector="olm.catalogSource=${source_name}" - fi -) - -operator_group() ( - group_namespace="$1" - group_name="$2" - target_namespaces=("${@:3}") - - kc() { kubectl --namespace="$group_namespace" "$@"; } - kc get namespace "$group_namespace" --output=jsonpath='{""}' 2>/dev/null || - kc create namespace "$group_namespace" - - group_json="$( jq <<< '{}' --arg name "$group_name" '{ - apiVersion: "operators.coreos.com/v1", kind: "OperatorGroup", - metadata: { "name": $name }, - spec: { targetNamespaces: [] } - }' )" - - for ns in "${target_namespaces[@]}"; do - group_json="$( jq <<< "$group_json" --arg namespace "$ns" '.spec.targetNamespaces += [ $namespace ]' )" - done - - kc create --filename=- <<< "$group_json" -) - -operator() ( - bundle_directory="$1" index_image="$2" - operator_namespace="$3" - target_namespaces=("${@:4}") - - package_name=$(yq \ - --raw-output '.annotations["operators.operatorframework.io.bundle.package.v1"]' \ - "${bundle_directory}"/*/annotations.yaml) - channel_name=$(yq \ - --raw-output '.annotations["operators.operatorframework.io.bundle.channels.v1"]' \ - "${bundle_directory}"/*/annotations.yaml) - csv_name=$(yq --raw-output '.metadata.name' \ - "${bundle_directory}"/*/*.clusterserviceversion.yaml) - - kc() { kubectl --namespace="$operator_namespace" "$@"; } - - catalog_source "$operator_namespace" olm-catalog-source "${index_image}" - operator_group "$operator_namespace" olm-operator-group "${target_namespaces[@]}" - - # Create a Subscription to install the operator. - # See https://godoc.org/github.com/operator-framework/api/pkg/operators/v1alpha1#Subscription - subscription_json=$(jq --null-input \ - --arg channel "$channel_name" \ - --arg namespace "$operator_namespace" \ - --arg package "$package_name" \ - --arg version "$csv_name" \ - '{ - apiVersion: "operators.coreos.com/v1alpha1", kind: "Subscription", - metadata: { name: $package }, - spec: { - name: $package, - sourceNamespace: $namespace, - source: "olm-catalog-source", - startingCSV: $version, - channel: $channel - } - }') - kc create --filename=- <<< "$subscription_json" - - # Wait for the InstallPlan to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get installplan --output=jsonpath="{.items}" )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=installed' --timeout='30s' installplan --all; then - subscription_uid="$( kc get subscription "$package_name" --output=jsonpath='{.metadata.uid}' )" - installplan_json="$( kc get installplan --output=json )" - - jq <<< "$installplan_json" --arg uid "$subscription_uid" \ - '.items[] | select(.metadata.ownerReferences[] | select(.uid == $uid)).status.conditions' - exit 1 - fi - - # Wait for Deployment to exist and be healthy. - for _ in $(seq 10); do - [ '[]' != "$( kc get deploy --selector="olm.owner=$csv_name" --output=jsonpath='{.items}' )" ] && - break || sleep 1s - done - if ! kc wait --for='condition=available' --timeout='30s' deploy --selector="olm.owner=$csv_name"; then - kc describe pod --selector="olm.owner=$csv_name" - - crashed_containers="$( kc get pod --selector="olm.owner=$csv_name" --output=json )" - crashed_containers="$( jq <<< "$crashed_containers" --raw-output \ - '.items[] | { - pod: .metadata.name, - container: .status.containerStatuses[] | select(.restartCount > 0).name - } | [.pod, .container] | @tsv' )" - - test -z "$crashed_containers" || while IFS=$'\t' read -r pod container; do - echo; echo "$pod/$container" restarted: - kc logs --container="$container" --previous --tail='-1' "pod/$pod" - done <<< "$crashed_containers" - - exit 1 - fi -) - -"$@" diff --git a/installers/olm/validate-directory.sh b/installers/olm/validate-directory.sh deleted file mode 100755 index 726f64946e..0000000000 --- a/installers/olm/validate-directory.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -if command -v oc > /dev/null; then - kubectl() { oc "$@"; } - kubectl version -else - kubectl version --short -fi - -push_trap_exit() { - local -a array - eval "array=($(trap -p EXIT))" - # shellcheck disable=SC2064 - trap "$1;${array[2]-}" EXIT -} - -validate_bundle_directory() { - local directory="$1" - local namespace - - namespace=$(kubectl create --filename=- --output='go-template={{.metadata.name}}' <<< '{ - "apiVersion": "v1", "kind": "Namespace", - "metadata": { - "generateName": "olm-test-", - "labels": { "olm-test": "bundle-directory" } - } - }') - echo 'namespace "'"${namespace}"'" created' - push_trap_exit "kubectl delete namespace '${namespace}'" - - # https://olm.operatorframework.io/docs/best-practices/common/ - # https://sdk.operatorframework.io/docs/advanced-topics/scorecard/scorecard/ - operator-sdk scorecard --namespace="${namespace}" "${directory}" -} - -validate_bundle_directory "$@" diff --git a/installers/olm/validate-image.sh b/installers/olm/validate-image.sh deleted file mode 100755 index 9d9adef6cf..0000000000 --- a/installers/olm/validate-image.sh +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env bash -# vim: set noexpandtab : -set -eu - -push_trap_exit() { - local -a array - eval "array=($(trap -p EXIT))" - # shellcheck disable=SC2064 - trap "$1;${array[2]-}" EXIT -} - -# Store anything in a single temporary directory that gets cleaned up. -TMPDIR=$(mktemp -d) -push_trap_exit "rm -rf '${TMPDIR}'" -export TMPDIR - -validate_bundle_image() { - local container="$1" directory="$2" - directory=$(cd "${directory}" && pwd) - - cat > "${TMPDIR}/registry.config" <<-SSL - [req] - distinguished_name = req_distinguished_name - x509_extensions = v3_ext - prompt = no - [req_distinguished_name] - commonName = localhost - [v3_ext] - subjectAltName = @alt_names - [alt_names] - DNS.1 = localhost - SSL - - openssl ecparam -name prime256v1 -genkey -out "${TMPDIR}/registry.key" - openssl req -new -x509 -days 1 \ - -config "${TMPDIR}/registry.config" \ - -key "${TMPDIR}/registry.key" \ - -out "${TMPDIR}/registry.crt" - - # Start a local image registry. - local image port registry - registry=$(${container} run --detach --publish-all \ - --env='REGISTRY_HTTP_TLS_CERTIFICATE=/mnt/registry.crt' \ - --env='REGISTRY_HTTP_TLS_KEY=/mnt/registry.key' \ - --volume="${TMPDIR}:/mnt" \ - docker.io/library/registry:latest) - # https://github.com/containers/podman/issues/8524 - push_trap_exit "echo -n 'Removing '; ${container} rm '${registry}'" - push_trap_exit "echo -n 'Stopping '; ${container} stop '${registry}'" - - port=$(${container} inspect "${registry}" \ - --format='{{ (index .NetworkSettings.Ports "5000/tcp" 0).HostPort }}') - image="localhost:${port}/postgres-operator-bundle:latest" - - cat > "${TMPDIR}/registries.conf" <<-TOML - [[registry]] - location = "localhost:${port}" - insecure = true - TOML - - # Build the bundle image and push it to the local registry. - ${container} run --rm \ - --device='/dev/fuse:rw' --network='host' --security-opt='seccomp=unconfined' \ - --volume="${TMPDIR}/registries.conf:/etc/containers/registries.conf.d/localhost.conf:ro" \ - --volume="${directory}:/mnt:delegated" \ - --workdir='/mnt' \ - quay.io/buildah/stable:latest \ - buildah build-using-dockerfile \ - --format='docker' --layers --tag="docker://${image}" - - local -a opm - local opm_version - opm_version=$(opm version) - opm_version=$(sed -n 's#.*OpmVersion:"\([^"]*\)".*#\1# p' <<< "${opm_version}") - # shellcheck disable=SC2206 - opm=(${container} run --rm - --network='host' - --volume="${TMPDIR}/registry.crt:/usr/local/share/ca-certificates/registry.crt:ro" - --volume="${TMPDIR}:/mnt:delegated" - --workdir='/mnt' - quay.io/operator-framework/upstream-opm-builder:"${opm_version}" - sh -ceu 'update-ca-certificates && exec "$@"' - opm) - - # Validate the bundle image in the local registry. - # https://olm.operatorframework.io/docs/tasks/creating-operator-bundle/#validating-your-bundle - "${opm[@]}" alpha bundle validate --image-builder='none' \ - --optional-validators='operatorhub,bundle-objects' \ - --tag="${image}" -} - -validate_bundle_image "$@" diff --git a/installers/seal.svg b/installers/seal.svg deleted file mode 100644 index 28e875f48f..0000000000 --- a/installers/seal.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/internal/bridge/client.go b/internal/bridge/client.go index d5ad8470f7..3e3c4c3b4c 100644 --- a/internal/bridge/client.go +++ b/internal/bridge/client.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -280,7 +280,7 @@ func (c *Client) doWithBackoff( request.Header = headers.Clone() //nolint:bodyclose // This response is returned to the caller. - response, err = c.Client.Do(request) + response, err = c.Do(request) } // An error indicates there was no response from the server, and the @@ -327,7 +327,7 @@ func (c *Client) doWithRetry( // Retry the request when the server responds with "Too many requests". // - https://docs.crunchybridge.com/api-concepts/getting-started/#status-codes // - https://docs.crunchybridge.com/api-concepts/getting-started/#rate-limiting - for err == nil && response.StatusCode == 429 { + for err == nil && response.StatusCode == http.StatusTooManyRequests { seconds, _ := strconv.Atoi(response.Header.Get("Retry-After")) // Only retry when the response indicates how long to wait. @@ -378,11 +378,11 @@ func (c *Client) CreateAuthObject(ctx context.Context, authn AuthObject) (AuthOb } // 401, Unauthorized - case response.StatusCode == 401: + case response.StatusCode == http.StatusUnauthorized: err = fmt.Errorf("%w: %s", errAuthentication, body) default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -409,7 +409,7 @@ func (c *Client) CreateInstallation(ctx context.Context) (Installation, error) { } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -445,7 +445,7 @@ func (c *Client) ListClusters(ctx context.Context, apiKey, teamId string) ([]*Cl } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -486,7 +486,7 @@ func (c *Client) CreateCluster( } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -524,14 +524,14 @@ func (c *Client) DeleteCluster(ctx context.Context, apiKey, id string) (*Cluster // --https://docs.crunchybridge.com/api-concepts/idempotency#delete-semantics // But also, if we can't find it... // Maybe if no ID we return already deleted? - case response.StatusCode == 410: + case response.StatusCode == http.StatusGone: fallthrough - case response.StatusCode == 404: + case response.StatusCode == http.StatusNotFound: deletedAlready = true err = nil default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -565,7 +565,7 @@ func (c *Client) GetCluster(ctx context.Context, apiKey, id string) (*ClusterApi } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -599,7 +599,7 @@ func (c *Client) GetClusterStatus(ctx context.Context, apiKey, id string) (*Clus } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -633,7 +633,7 @@ func (c *Client) GetClusterUpgrade(ctx context.Context, apiKey, id string) (*Clu } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -674,7 +674,7 @@ func (c *Client) UpgradeCluster( } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -709,7 +709,7 @@ func (c *Client) UpgradeClusterHA(ctx context.Context, apiKey, id, action string } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -724,10 +724,7 @@ func (c *Client) UpdateCluster( ) (*ClusterApiResource, error) { result := &ClusterApiResource{} - clusterbyte, err := json.Marshal(clusterRequestPayload) - if err != nil { - return result, err - } + clusterbyte, _ := json.Marshal(clusterRequestPayload) response, err := c.doWithRetry(ctx, "PATCH", "/clusters/"+id, nil, clusterbyte, http.Header{ "Accept": []string{"application/json"}, @@ -750,7 +747,7 @@ func (c *Client) UpdateCluster( } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -780,7 +777,7 @@ func (c *Client) GetClusterRole(ctx context.Context, apiKey, clusterId, roleName } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } @@ -810,7 +807,7 @@ func (c *Client) ListClusterRoles(ctx context.Context, apiKey, id string) ([]*Cl } default: - //nolint:goerr113 // This is intentionally dynamic. + //nolint:err113 // This is intentionally dynamic. err = fmt.Errorf("%v: %s", response.Status, body) } } diff --git a/internal/bridge/client_test.go b/internal/bridge/client_test.go index 28728c701c..f1aa1c8ddd 100644 --- a/internal/bridge/client_test.go +++ b/internal/bridge/client_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -31,8 +31,8 @@ func TestClientBackoff(t *testing.T) { client := NewClient("", "") var total time.Duration - for i := 1; i <= 50 && client.Backoff.Steps > 0; i++ { - step := client.Backoff.Step() + for i := 1; i <= 50 && client.Steps > 0; i++ { + step := client.Step() total += step t.Logf("%02d:%20v%20v", i, step, total) @@ -68,7 +68,7 @@ func TestClientDoWithBackoff(t *testing.T) { // Client with one attempt, i.e. no backoff. client := NewClient(server.URL, "xyz") - client.Backoff.Steps = 1 + client.Steps = 1 assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() @@ -113,8 +113,8 @@ func TestClientDoWithBackoff(t *testing.T) { // Client with brief backoff. client := NewClient(server.URL, "") - client.Backoff.Duration = time.Millisecond - client.Backoff.Steps = 5 + client.Duration = time.Millisecond + client.Steps = 5 assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() @@ -170,8 +170,8 @@ func TestClientDoWithBackoff(t *testing.T) { // Client with brief backoff. client := NewClient(server.URL, "") - client.Backoff.Duration = time.Millisecond - client.Backoff.Steps = 5 + client.Duration = time.Millisecond + client.Steps = 5 assert.Equal(t, client.BaseURL.String(), server.URL) ctx := context.Background() @@ -190,8 +190,8 @@ func TestClientDoWithBackoff(t *testing.T) { // Client with lots of brief backoff. client := NewClient(server.URL, "") - client.Backoff.Duration = time.Millisecond - client.Backoff.Steps = 100 + client.Duration = time.Millisecond + client.Steps = 100 assert.Equal(t, client.BaseURL.String(), server.URL) ctx, cancel := context.WithTimeout(context.Background(), 50*time.Millisecond) diff --git a/internal/bridge/crunchybridgecluster/apply.go b/internal/bridge/crunchybridgecluster/apply.go index d77d719d6a..6edd870790 100644 --- a/internal/bridge/crunchybridgecluster/apply.go +++ b/internal/bridge/crunchybridgecluster/apply.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -22,7 +22,7 @@ func (r *CrunchyBridgeClusterReconciler) patch( patch client.Patch, options ...client.PatchOption, ) error { options = append([]client.PatchOption{r.Owner}, options...) - return r.Client.Patch(ctx, object, patch, options...) + return r.Patch(ctx, object, patch, options...) } // apply sends an apply patch to object's endpoint in the Kubernetes API and diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go index 03d67442be..c83c7a0c39 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -23,7 +23,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/controller/runtime" - pgoRuntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -57,7 +56,7 @@ func (r *CrunchyBridgeClusterReconciler) SetupWithManager( // Potentially replace with different requeue times, remove the Watch function // Smarter: retry after a certain time for each cluster: https://gist.github.com/cbandy/a5a604e3026630c5b08cfbcdfffd2a13 WatchesRawSource( - pgoRuntime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, r.Watch()), + runtime.NewTickerImmediate(5*time.Minute, event.GenericEvent{}, r.Watch()), ). // Watch secrets and filter for secrets mentioned by CrunchyBridgeClusters Watches( @@ -80,7 +79,7 @@ func (r *CrunchyBridgeClusterReconciler) SetupWithManager( func (r *CrunchyBridgeClusterReconciler) setControllerReference( owner *v1beta1.CrunchyBridgeCluster, controlled client.Object, ) error { - return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetControllerReference(owner, controlled, r.Scheme()) } //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="crunchybridgeclusters",verbs={get,patch,update} @@ -671,7 +670,7 @@ func (r *CrunchyBridgeClusterReconciler) GetSecretKeys( }} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existing), existing)) + r.Get(ctx, client.ObjectKeyFromObject(existing), existing)) if err == nil { if existing.Data["key"] != nil && existing.Data["team"] != nil { @@ -694,7 +693,7 @@ func (r *CrunchyBridgeClusterReconciler) deleteControlled( version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - return r.Client.Delete(ctx, object, exactly) + return r.Delete(ctx, object, exactly) } return nil diff --git a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go index 92d6b58d0e..a29b418b13 100644 --- a/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go +++ b/internal/bridge/crunchybridgecluster/crunchybridgecluster_controller_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/bridge/crunchybridgecluster/delete.go b/internal/bridge/crunchybridgecluster/delete.go index 8dcada31cf..ae44c8036b 100644 --- a/internal/bridge/crunchybridgecluster/delete.go +++ b/internal/bridge/crunchybridgecluster/delete.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -28,7 +28,7 @@ func (r *CrunchyBridgeClusterReconciler) handleDelete( log := ctrl.LoggerFrom(ctx) // If the CrunchyBridgeCluster isn't being deleted, add the finalizer - if crunchybridgecluster.ObjectMeta.DeletionTimestamp.IsZero() { + if crunchybridgecluster.DeletionTimestamp.IsZero() { if !controllerutil.ContainsFinalizer(crunchybridgecluster, finalizer) { controllerutil.AddFinalizer(crunchybridgecluster, finalizer) if err := r.Update(ctx, crunchybridgecluster); err != nil { diff --git a/internal/bridge/crunchybridgecluster/delete_test.go b/internal/bridge/crunchybridgecluster/delete_test.go index 28e6feb1f8..c86746ef1b 100644 --- a/internal/bridge/crunchybridgecluster/delete_test.go +++ b/internal/bridge/crunchybridgecluster/delete_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -65,7 +65,7 @@ func TestHandleDeleteCluster(t *testing.T) { // Get cluster from kubernetes and assert that the deletion timestamp was added assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) - assert.Check(t, !cluster.ObjectMeta.DeletionTimestamp.IsZero()) + assert.Check(t, !cluster.DeletionTimestamp.IsZero()) // Note: We must run handleDelete multiple times because we don't want to remove the // finalizer until we're sure that the cluster has been deleted from Bridge, so we @@ -107,7 +107,7 @@ func TestHandleDeleteCluster(t *testing.T) { // Get cluster from kubernetes and assert that the deletion timestamp was added assert.NilError(t, tClient.Get(ctx, client.ObjectKeyFromObject(cluster), cluster)) - assert.Check(t, !cluster.ObjectMeta.DeletionTimestamp.IsZero()) + assert.Check(t, !cluster.DeletionTimestamp.IsZero()) // Run handleDelete again to attempt to delete from Bridge, but provide bad api key cluster.Status.ID = "2345" diff --git a/internal/bridge/crunchybridgecluster/helpers_test.go b/internal/bridge/crunchybridgecluster/helpers_test.go index f40ad3d054..f8bc4295f6 100644 --- a/internal/bridge/crunchybridgecluster/helpers_test.go +++ b/internal/bridge/crunchybridgecluster/helpers_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/bridge/crunchybridgecluster/mock_bridge_api.go b/internal/bridge/crunchybridgecluster/mock_bridge_api.go index 5c6b243714..f0439531d1 100644 --- a/internal/bridge/crunchybridgecluster/mock_bridge_api.go +++ b/internal/bridge/crunchybridgecluster/mock_bridge_api.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -13,7 +13,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/initialize" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) diff --git a/internal/bridge/crunchybridgecluster/postgres.go b/internal/bridge/crunchybridgecluster/postgres.go index 024631de67..80096de91b 100644 --- a/internal/bridge/crunchybridgecluster/postgres.go +++ b/internal/bridge/crunchybridgecluster/postgres.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -11,7 +11,6 @@ import ( "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" @@ -93,7 +92,7 @@ func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( // Make sure that this cluster's role secret names are not being used by any other // secrets in the namespace allSecretsInNamespace := &corev1.SecretList{} - err := errors.WithStack(r.Client.List(ctx, allSecretsInNamespace, client.InNamespace(cluster.Namespace))) + err := errors.WithStack(r.List(ctx, allSecretsInNamespace, client.InNamespace(cluster.Namespace))) if err != nil { return nil, nil, err } @@ -116,7 +115,7 @@ func (r *CrunchyBridgeClusterReconciler) reconcilePostgresRoleSecrets( selector, err := naming.AsSelector(naming.CrunchyBridgeClusterPostgresRoles(cluster.Name)) if err == nil { err = errors.WithStack( - r.Client.List(ctx, secrets, + r.List(ctx, secrets, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector}, )) diff --git a/internal/bridge/crunchybridgecluster/postgres_test.go b/internal/bridge/crunchybridgecluster/postgres_test.go index 66add7b789..6fae4fe26a 100644 --- a/internal/bridge/crunchybridgecluster/postgres_test.go +++ b/internal/bridge/crunchybridgecluster/postgres_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -8,12 +8,11 @@ import ( "context" "testing" - "sigs.k8s.io/controller-runtime/pkg/client" - "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/crunchydata/postgres-operator/internal/bridge" "github.com/crunchydata/postgres-operator/internal/testing/require" diff --git a/internal/bridge/crunchybridgecluster/watches.go b/internal/bridge/crunchybridgecluster/watches.go index 79687b3476..515b3dbf52 100644 --- a/internal/bridge/crunchybridgecluster/watches.go +++ b/internal/bridge/crunchybridgecluster/watches.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/bridge/crunchybridgecluster/watches_test.go b/internal/bridge/crunchybridgecluster/watches_test.go index 48dba2ba14..7ac0e26e57 100644 --- a/internal/bridge/crunchybridgecluster/watches_test.go +++ b/internal/bridge/crunchybridgecluster/watches_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/bridge/installation.go b/internal/bridge/installation.go index c76a073348..3464c6f020 100644 --- a/internal/bridge/installation.go +++ b/internal/bridge/installation.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/bridge/installation_test.go b/internal/bridge/installation_test.go index 96223a2233..7f35781820 100644 --- a/internal/bridge/installation_test.go +++ b/internal/bridge/installation_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -99,7 +99,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } @@ -155,7 +155,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } @@ -289,7 +289,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } @@ -343,7 +343,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } @@ -426,7 +426,7 @@ func TestInstallationReconcile(t *testing.T) { reconciler.NewClient = func() *Client { c := NewClient(server.URL, "") - c.Backoff.Steps = 1 + c.Steps = 1 assert.Equal(t, c.BaseURL.String(), server.URL) return c } diff --git a/internal/bridge/naming.go b/internal/bridge/naming.go index cabe8e9cf6..7b8b6a9223 100644 --- a/internal/bridge/naming.go +++ b/internal/bridge/naming.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/bridge/quantity.go b/internal/bridge/quantity.go index a948c6b4cf..e4edd2a149 100644 --- a/internal/bridge/quantity.go +++ b/internal/bridge/quantity.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/bridge/quantity_test.go b/internal/bridge/quantity_test.go index 7cfebb4a86..d0e914a9d1 100644 --- a/internal/bridge/quantity_test.go +++ b/internal/bridge/quantity_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/config/config.go b/internal/config/config.go index e3f9ced215..5186f1626a 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -152,7 +152,7 @@ func VerifyImageValues(cluster *v1beta1.PostgresCluster) error { } if len(images) > 0 { - return fmt.Errorf("Missing image(s): %s", images) + return fmt.Errorf("missing image(s): %s", images) } return nil diff --git a/internal/config/config_test.go b/internal/config/config_test.go index 7b8ca2f863..de308544f4 100644 --- a/internal/config/config_test.go +++ b/internal/config/config_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/pgupgrade/apply.go b/internal/controller/pgupgrade/apply.go index 71cf65cd4f..fb0c55950e 100644 --- a/internal/controller/pgupgrade/apply.go +++ b/internal/controller/pgupgrade/apply.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/pgupgrade/jobs.go b/internal/controller/pgupgrade/jobs.go index a1722dfc12..30a2334a18 100644 --- a/internal/controller/pgupgrade/jobs.go +++ b/internal/controller/pgupgrade/jobs.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -12,9 +12,11 @@ import ( appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -32,9 +34,9 @@ func pgUpgradeJob(upgrade *v1beta1.PGUpgrade) metav1.ObjectMeta { // upgradeCommand returns an entrypoint that prepares the filesystem for // and performs a PostgreSQL major version upgrade using pg_upgrade. -func upgradeCommand(upgrade *v1beta1.PGUpgrade, fetchKeyCommand string) []string { - oldVersion := fmt.Sprint(upgrade.Spec.FromPostgresVersion) - newVersion := fmt.Sprint(upgrade.Spec.ToPostgresVersion) +func upgradeCommand(oldVersion, newVersion int, fetchKeyCommand string, availableCPUs int) []string { + // Use multiple CPUs when three or more are available. + argJobs := fmt.Sprintf(` --jobs=%d`, max(1, availableCPUs-1)) // if the fetch key command is set for TDE, provide the value during initialization initdb := `/usr/pgsql-"${new_version}"/bin/initdb -k -D /pgdata/pg"${new_version}"` @@ -42,7 +44,7 @@ func upgradeCommand(upgrade *v1beta1.PGUpgrade, fetchKeyCommand string) []string initdb += ` --encryption-key-command "` + fetchKeyCommand + `"` } - args := []string{oldVersion, newVersion} + args := []string{fmt.Sprint(oldVersion), fmt.Sprint(newVersion)} script := strings.Join([]string{ `declare -r data_volume='/pgdata' old_version="$1" new_version="$2"`, `printf 'Performing PostgreSQL upgrade from version "%s" to "%s" ...\n\n' "$@"`, @@ -96,14 +98,14 @@ func upgradeCommand(upgrade *v1beta1.PGUpgrade, fetchKeyCommand string) []string `echo -e "Step 5: Running pg_upgrade check...\n"`, `time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \`, `--new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}"\`, - ` --new-datadir /pgdata/pg"${new_version}" --link --check`, + ` --new-datadir /pgdata/pg"${new_version}" --link --check` + argJobs, // Assuming the check completes successfully, the pg_upgrade command will // be run that actually prepares the upgraded pgdata directory. `echo -e "\nStep 6: Running pg_upgrade...\n"`, `time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \`, `--new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}" \`, - `--new-datadir /pgdata/pg"${new_version}" --link`, + `--new-datadir /pgdata/pg"${new_version}" --link` + argJobs, // Since we have cleared the Patroni cluster step by removing the EndPoints, we copy patroni.dynamic.json // from the old data dir to help retain PostgreSQL parameters you had set before. @@ -117,10 +119,21 @@ func upgradeCommand(upgrade *v1beta1.PGUpgrade, fetchKeyCommand string) []string return append([]string{"bash", "-ceu", "--", script, "upgrade"}, args...) } +// largestWholeCPU returns the maximum CPU request or limit as a non-negative +// integer of CPUs. When resources lacks any CPU, the result is zero. +func largestWholeCPU(resources corev1.ResourceRequirements) int { + // Read CPU quantities as millicores then divide to get the "floor." + // NOTE: [resource.Quantity.Value] looks easier, but it rounds up. + return max( + int(resources.Limits.Cpu().ScaledValue(resource.Milli)/1000), + int(resources.Requests.Cpu().ScaledValue(resource.Milli)/1000), + 0) +} + // generateUpgradeJob returns a Job that can upgrade the PostgreSQL data // directory of the startup instance. func (r *PGUpgradeReconciler) generateUpgradeJob( - _ context.Context, upgrade *v1beta1.PGUpgrade, + ctx context.Context, upgrade *v1beta1.PGUpgrade, startup *appsv1.StatefulSet, fetchKeyCommand string, ) *batchv1.Job { job := &batchv1.Job{} @@ -162,6 +175,12 @@ func (r *PGUpgradeReconciler) generateUpgradeJob( job.Spec.BackoffLimit = initialize.Int32(0) job.Spec.Template.Spec.RestartPolicy = corev1.RestartPolicyNever + // When enabled, calculate the number of CPUs for pg_upgrade. + wholeCPUs := 0 + if feature.Enabled(ctx, feature.PGUpgradeCPUConcurrency) { + wholeCPUs = largestWholeCPU(upgrade.Spec.Resources) + } + // Replace all containers with one that does the upgrade. job.Spec.Template.Spec.EphemeralContainers = nil job.Spec.Template.Spec.InitContainers = nil @@ -174,7 +193,11 @@ func (r *PGUpgradeReconciler) generateUpgradeJob( VolumeMounts: database.VolumeMounts, // Use our upgrade command and the specified image and resources. - Command: upgradeCommand(upgrade, fetchKeyCommand), + Command: upgradeCommand( + upgrade.Spec.FromPostgresVersion, + upgrade.Spec.ToPostgresVersion, + fetchKeyCommand, + wholeCPUs), Image: pgUpgradeContainerImage(upgrade), ImagePullPolicy: upgrade.Spec.ImagePullPolicy, Resources: upgrade.Spec.Resources, @@ -315,7 +338,7 @@ func pgUpgradeContainerImage(upgrade *v1beta1.PGUpgrade) string { // spec is defined. If it is undefined, an error is returned. func verifyUpgradeImageValue(upgrade *v1beta1.PGUpgrade) error { if pgUpgradeContainerImage(upgrade) == "" { - return fmt.Errorf("Missing crunchy-upgrade image") + return fmt.Errorf("missing crunchy-upgrade image") } return nil } diff --git a/internal/controller/pgupgrade/jobs_test.go b/internal/controller/pgupgrade/jobs_test.go index 8dfc4731a2..d625e73ae4 100644 --- a/internal/controller/pgupgrade/jobs_test.go +++ b/internal/controller/pgupgrade/jobs_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -16,11 +16,85 @@ import ( "k8s.io/apimachinery/pkg/api/resource" "sigs.k8s.io/yaml" + "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/testing/cmp" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +func TestLargestWholeCPU(t *testing.T) { + assert.Equal(t, 0, + largestWholeCPU(corev1.ResourceRequirements{}), + "expected the zero value to be zero") + + for _, tt := range []struct { + Name, ResourcesYAML string + Result int + }{ + { + Name: "Negatives", ResourcesYAML: `{requests: {cpu: -3}, limits: {cpu: -5}}`, + Result: 0, + }, + { + Name: "SmallPositive", ResourcesYAML: `limits: {cpu: 600m}`, + Result: 0, + }, + { + Name: "FractionalPositive", ResourcesYAML: `requests: {cpu: 2200m}`, + Result: 2, + }, + { + Name: "LargePositive", ResourcesYAML: `limits: {cpu: 10}`, + Result: 10, + }, + { + Name: "RequestsAndLimits", ResourcesYAML: `{requests: {cpu: 2}, limits: {cpu: 4}}`, + Result: 4, + }, + } { + t.Run(tt.Name, func(t *testing.T) { + var resources corev1.ResourceRequirements + assert.NilError(t, yaml.Unmarshal([]byte(tt.ResourcesYAML), &resources)) + assert.Equal(t, tt.Result, largestWholeCPU(resources)) + }) + } +} + +func TestUpgradeCommand(t *testing.T) { + expectScript := func(t *testing.T, script string) { + t.Helper() + + t.Run("PrettyYAML", func(t *testing.T) { + b, err := yaml.Marshal(script) + assert.NilError(t, err) + assert.Assert(t, strings.HasPrefix(string(b), `|`), + "expected literal block scalar, got:\n%s", b) + }) + } + + t.Run("CPUs", func(t *testing.T) { + for _, tt := range []struct { + CPUs int + Jobs string + }{ + {CPUs: 0, Jobs: "--jobs=1"}, + {CPUs: 1, Jobs: "--jobs=1"}, + {CPUs: 2, Jobs: "--jobs=1"}, + {CPUs: 3, Jobs: "--jobs=2"}, + {CPUs: 10, Jobs: "--jobs=9"}, + } { + command := upgradeCommand(10, 11, "", tt.CPUs) + assert.Assert(t, len(command) > 3) + assert.DeepEqual(t, []string{"bash", "-ceu", "--"}, command[:3]) + + script := command[3] + assert.Assert(t, cmp.Contains(script, tt.Jobs)) + + expectScript(t, script) + } + }) +} + func TestGenerateUpgradeJob(t *testing.T) { ctx := context.Background() reconciler := &PGUpgradeReconciler{} @@ -116,11 +190,11 @@ spec: echo -e "Step 5: Running pg_upgrade check...\n" time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \ --new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}"\ - --new-datadir /pgdata/pg"${new_version}" --link --check + --new-datadir /pgdata/pg"${new_version}" --link --check --jobs=1 echo -e "\nStep 6: Running pg_upgrade...\n" time /usr/pgsql-"${new_version}"/bin/pg_upgrade --old-bindir /usr/pgsql-"${old_version}"/bin \ --new-bindir /usr/pgsql-"${new_version}"/bin --old-datadir /pgdata/pg"${old_version}" \ - --new-datadir /pgdata/pg"${new_version}" --link + --new-datadir /pgdata/pg"${new_version}" --link --jobs=1 echo -e "\nStep 7: Copying patroni.dynamic.json...\n" cp /pgdata/pg"${old_version}"/patroni.dynamic.json /pgdata/pg"${new_version}" echo -e "\npg_upgrade Job Complete!" @@ -145,6 +219,18 @@ spec: status: {} `)) + t.Run(feature.PGUpgradeCPUConcurrency+"Enabled", func(t *testing.T) { + gate := feature.NewGate() + assert.NilError(t, gate.SetFromMap(map[string]bool{ + feature.PGUpgradeCPUConcurrency: true, + })) + ctx := feature.NewContext(context.Background(), gate) + + job := reconciler.generateUpgradeJob(ctx, upgrade, startup, "") + b, _ := yaml.Marshal(job) + assert.Assert(t, strings.Contains(string(b), `--jobs=2`)) + }) + tdeJob := reconciler.generateUpgradeJob(ctx, upgrade, startup, "echo testKey") b, _ := yaml.Marshal(tdeJob) assert.Assert(t, strings.Contains(string(b), diff --git a/internal/controller/pgupgrade/labels.go b/internal/controller/pgupgrade/labels.go index 187fe6bf6f..ac433e2a0b 100644 --- a/internal/controller/pgupgrade/labels.go +++ b/internal/controller/pgupgrade/labels.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/pgupgrade/pgupgrade_controller.go b/internal/controller/pgupgrade/pgupgrade_controller.go index d6d145b793..b4e0002b56 100644 --- a/internal/controller/pgupgrade/pgupgrade_controller.go +++ b/internal/controller/pgupgrade/pgupgrade_controller.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -22,7 +22,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/controller/runtime" - "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -35,8 +34,7 @@ type PGUpgradeReconciler struct { Client client.Client Owner client.FieldOwner - Recorder record.EventRecorder - Registration registration.Registration + Recorder record.EventRecorder } //+kubebuilder:rbac:groups="batch",resources="jobs",verbs={list,watch} @@ -165,10 +163,6 @@ func (r *PGUpgradeReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return } - if !r.UpgradeAuthorized(upgrade) { - return ctrl.Result{}, nil - } - // Set progressing condition to true if it doesn't exist already setStatusToProgressingIfReasonWas("", upgrade) diff --git a/internal/controller/pgupgrade/registration.go b/internal/controller/pgupgrade/registration.go deleted file mode 100644 index 05d0d80cbd..0000000000 --- a/internal/controller/pgupgrade/registration.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package pgupgrade - -import ( - "k8s.io/apimachinery/pkg/api/meta" - - "github.com/crunchydata/postgres-operator/internal/registration" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func (r *PGUpgradeReconciler) UpgradeAuthorized(upgrade *v1beta1.PGUpgrade) bool { - // Allow an upgrade in progress to complete, when the registration requirement is introduced. - // But don't allow new upgrades to be started until a valid token is applied. - progressing := meta.FindStatusCondition(upgrade.Status.Conditions, ConditionPGUpgradeProgressing) != nil - required := r.Registration.Required(r.Recorder, upgrade, &upgrade.Status.Conditions) - - // If a valid token has not been applied, warn the user. - if required && !progressing { - registration.SetRequiredWarning(r.Recorder, upgrade, &upgrade.Status.Conditions) - return false - } - - return true -} diff --git a/internal/controller/pgupgrade/registration_test.go b/internal/controller/pgupgrade/registration_test.go deleted file mode 100644 index dc3a4144bc..0000000000 --- a/internal/controller/pgupgrade/registration_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package pgupgrade - -import ( - "testing" - - "gotest.tools/v3/assert" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crunchydata/postgres-operator/internal/controller/runtime" - "github.com/crunchydata/postgres-operator/internal/registration" - "github.com/crunchydata/postgres-operator/internal/testing/cmp" - "github.com/crunchydata/postgres-operator/internal/testing/events" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func TestUpgradeAuthorized(t *testing.T) { - t.Run("UpgradeAlreadyInProgress", func(t *testing.T) { - reconciler := new(PGUpgradeReconciler) - upgrade := new(v1beta1.PGUpgrade) - - for _, required := range []bool{false, true} { - reconciler.Registration = registration.RegistrationFunc( - func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { - return required - }) - - meta.SetStatusCondition(&upgrade.Status.Conditions, metav1.Condition{ - Type: ConditionPGUpgradeProgressing, - Status: metav1.ConditionTrue, - }) - - result := reconciler.UpgradeAuthorized(upgrade) - assert.Assert(t, result, "expected signal to proceed") - - progressing := meta.FindStatusCondition(upgrade.Status.Conditions, ConditionPGUpgradeProgressing) - assert.Equal(t, progressing.Status, metav1.ConditionTrue) - } - }) - - t.Run("RegistrationRequired", func(t *testing.T) { - recorder := events.NewRecorder(t, runtime.Scheme) - upgrade := new(v1beta1.PGUpgrade) - upgrade.Name = "some-upgrade" - - reconciler := PGUpgradeReconciler{ - Recorder: recorder, - Registration: registration.RegistrationFunc( - func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { - return true - }), - } - - meta.RemoveStatusCondition(&upgrade.Status.Conditions, ConditionPGUpgradeProgressing) - - result := reconciler.UpgradeAuthorized(upgrade) - assert.Assert(t, !result, "expected signal to not proceed") - - condition := meta.FindStatusCondition(upgrade.Status.Conditions, v1beta1.Registered) - if assert.Check(t, condition != nil) { - assert.Equal(t, condition.Status, metav1.ConditionFalse) - } - - if assert.Check(t, len(recorder.Events) > 0) { - assert.Equal(t, recorder.Events[0].Type, "Warning") - assert.Equal(t, recorder.Events[0].Regarding.Kind, "PGUpgrade") - assert.Equal(t, recorder.Events[0].Regarding.Name, "some-upgrade") - assert.Assert(t, cmp.Contains(recorder.Events[0].Note, "requires")) - } - }) - - t.Run("RegistrationCompleted", func(t *testing.T) { - reconciler := new(PGUpgradeReconciler) - upgrade := new(v1beta1.PGUpgrade) - - called := false - reconciler.Registration = registration.RegistrationFunc( - func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { - called = true - return false - }) - - meta.RemoveStatusCondition(&upgrade.Status.Conditions, ConditionPGUpgradeProgressing) - - result := reconciler.UpgradeAuthorized(upgrade) - assert.Assert(t, result, "expected signal to proceed") - assert.Assert(t, called, "expected registration package to clear conditions") - }) -} diff --git a/internal/controller/pgupgrade/utils.go b/internal/controller/pgupgrade/utils.go index 292107e440..6c92ba5693 100644 --- a/internal/controller/pgupgrade/utils.go +++ b/internal/controller/pgupgrade/utils.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/pgupgrade/world.go b/internal/controller/pgupgrade/world.go index 18d056fe25..c5536e720b 100644 --- a/internal/controller/pgupgrade/world.go +++ b/internal/controller/pgupgrade/world.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/pgupgrade/world_test.go b/internal/controller/pgupgrade/world_test.go index 4aa24f714d..d3e1b307d0 100644 --- a/internal/controller/pgupgrade/world_test.go +++ b/internal/controller/pgupgrade/world_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/apply.go b/internal/controller/postgrescluster/apply.go index 2dae1f7d80..ce3d2fb9e5 100644 --- a/internal/controller/postgrescluster/apply.go +++ b/internal/controller/postgrescluster/apply.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/apply_test.go b/internal/controller/postgrescluster/apply_test.go index c163e8a5ab..d2c77ceb27 100644 --- a/internal/controller/postgrescluster/apply_test.go +++ b/internal/controller/postgrescluster/apply_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -151,6 +151,14 @@ func TestServerSideApply(t *testing.T) { MatchLabels: map[string]string{"select": name}, } sts.Spec.Template.Labels = map[string]string{"select": name} + sts.Spec.Template.Spec = corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + } return &sts } diff --git a/internal/controller/postgrescluster/cluster.go b/internal/controller/postgrescluster/cluster.go index 3ba6eab0e8..bd1983237f 100644 --- a/internal/controller/postgrescluster/cluster.go +++ b/internal/controller/postgrescluster/cluster.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index be9e371a56..5e35cc285f 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -8,7 +8,6 @@ import ( "context" "testing" - "github.com/pkg/errors" "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" @@ -91,7 +90,7 @@ func TestCustomLabels(t *testing.T) { ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) + assert.NilError(t, reconciler.Client.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( @@ -141,8 +140,8 @@ func TestCustomLabels(t *testing.T) { t.Run("Cluster", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "global-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "global-cluster" + cluster.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "daisy-instance1", Replicas: initialize.Int32(1), @@ -191,8 +190,8 @@ func TestCustomLabels(t *testing.T) { t.Run("Instance", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "instance-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "instance-cluster" + cluster.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "max-instance", Replicas: initialize.Int32(1), @@ -245,8 +244,8 @@ func TestCustomLabels(t *testing.T) { t.Run("PGBackRest", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "pgbackrest-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "pgbackrest-cluster" + cluster.Namespace = ns.Name cluster.Spec.Backups.PGBackRest.Metadata = &v1beta1.Metadata{ Labels: map[string]string{"my.pgbackrest.label": "lucy"}, } @@ -292,8 +291,8 @@ func TestCustomLabels(t *testing.T) { t.Run("PGBouncer", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "pgbouncer-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "pgbouncer-cluster" + cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Metadata = &v1beta1.Metadata{ Labels: map[string]string{"my.pgbouncer.label": "lucy"}, } @@ -344,7 +343,7 @@ func TestCustomAnnotations(t *testing.T) { ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) + assert.NilError(t, reconciler.Client.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( @@ -394,8 +393,8 @@ func TestCustomAnnotations(t *testing.T) { t.Run("Cluster", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "global-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "global-cluster" + cluster.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "daisy-instance1", Replicas: initialize.Int32(1), @@ -445,8 +444,8 @@ func TestCustomAnnotations(t *testing.T) { t.Run("Instance", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "instance-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "instance-cluster" + cluster.Namespace = ns.Name cluster.Spec.InstanceSets = []v1beta1.PostgresInstanceSetSpec{{ Name: "max-instance", Replicas: initialize.Int32(1), @@ -499,8 +498,8 @@ func TestCustomAnnotations(t *testing.T) { t.Run("PGBackRest", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "pgbackrest-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "pgbackrest-cluster" + cluster.Namespace = ns.Name cluster.Spec.Backups.PGBackRest.Metadata = &v1beta1.Metadata{ Annotations: map[string]string{"my.pgbackrest.annotation": "lucy"}, } @@ -546,8 +545,8 @@ func TestCustomAnnotations(t *testing.T) { t.Run("PGBouncer", func(t *testing.T) { cluster := testCluster() - cluster.ObjectMeta.Name = "pgbouncer-cluster" - cluster.ObjectMeta.Namespace = ns.Name + cluster.Name = "pgbouncer-cluster" + cluster.Namespace = ns.Name cluster.Spec.Proxy.PGBouncer.Metadata = &v1beta1.Metadata{ Annotations: map[string]string{"my.pgbouncer.annotation": "lucy"}, } @@ -798,12 +797,12 @@ type: ClusterIP assert.NilError(t, err) // Annotations present in the metadata. - assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Annotations, ` + assert.Assert(t, cmp.MarshalMatches(service.Annotations, ` some: note `)) // Labels present in the metadata. - assert.Assert(t, cmp.MarshalMatches(service.ObjectMeta.Labels, ` + assert.Assert(t, cmp.MarshalMatches(service.Labels, ` happy: label postgres-operator.crunchydata.com/cluster: pg2 postgres-operator.crunchydata.com/role: replica diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index d459d30a10..414a90ac15 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -40,7 +40,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/pgmonitor" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" - "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -59,9 +58,8 @@ type Reconciler struct { ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error - Recorder record.EventRecorder - Registration registration.Registration - Tracer trace.Tracer + Recorder record.EventRecorder + Tracer trace.Tracer } // +kubebuilder:rbac:groups="",resources="events",verbs={create,patch} @@ -136,6 +134,16 @@ func (r *Reconciler) Reconcile( return runtime.ErrorWithBackoff(err) } } + // Issue Warning Event if postgres version is EOL according to PostgreSQL: + // https://www.postgresql.org/support/versioning/ + currentTime := time.Now() + if postgres.ReleaseIsFinal(cluster.Spec.PostgresVersion, currentTime) { + r.Recorder.Eventf(cluster, corev1.EventTypeWarning, "EndOfLifePostgresVersion", + "The last minor version of Postgres %[1]v has been released."+ + " PG %[1]v will no longer receive updates. We recommend upgrading."+ + " See https://www.postgresql.org/support/versioning", + cluster.Spec.PostgresVersion) + } if cluster.Spec.Standby != nil && cluster.Spec.Standby.Enabled && @@ -185,12 +193,6 @@ func (r *Reconciler) Reconcile( return nil } - if r.Registration != nil && r.Registration.Required(r.Recorder, cluster, &cluster.Status.Conditions) { - registration.SetAdvanceWarning(r.Recorder, cluster, &cluster.Status.Conditions) - } - cluster.Status.RegistrationRequired = nil - cluster.Status.TokenRequired = "" - // if the cluster is paused, set a condition and return if cluster.Spec.Paused != nil && *cluster.Spec.Paused { meta.SetStatusCondition(&cluster.Status.Conditions, metav1.Condition{ diff --git a/internal/controller/postgrescluster/controller_ref_manager.go b/internal/controller/postgrescluster/controller_ref_manager.go index 8c4a34189f..4086fe15dc 100644 --- a/internal/controller/postgrescluster/controller_ref_manager.go +++ b/internal/controller/postgrescluster/controller_ref_manager.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -87,7 +87,7 @@ func (r *Reconciler) claimObject(ctx context.Context, postgresCluster *v1beta1.P // At this point the resource has no controller ref and is therefore an orphan. Ignore if // either the PostgresCluster resource or the orphaned resource is being deleted, or if the selector - // for the orphaned resource doesn't doesn't include the proper PostgresCluster label + // for the orphaned resource doesn't include the proper PostgresCluster label _, hasPGClusterLabel := obj.GetLabels()[naming.LabelCluster] if postgresCluster.GetDeletionTimestamp() != nil || !hasPGClusterLabel { return nil diff --git a/internal/controller/postgrescluster/controller_ref_manager_test.go b/internal/controller/postgrescluster/controller_ref_manager_test.go index 8543fe390d..758daf2ef3 100644 --- a/internal/controller/postgrescluster/controller_ref_manager_test.go +++ b/internal/controller/postgrescluster/controller_ref_manager_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -46,6 +46,14 @@ func TestManageControllerRefs(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: map[string]string{"label1": "val1"}, }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + }, }, }, } diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index e6fdc5cb86..e2397c24bb 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -13,14 +13,12 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" - "github.com/pkg/errors" - + "github.com/pkg/errors" //nolint:depguard // This legacy test covers so much code, it logs the origin of unexpected errors. "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/rand" "k8s.io/apimachinery/pkg/util/version" @@ -30,7 +28,6 @@ import ( "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/registration" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -89,34 +86,6 @@ func TestDeleteControlled(t *testing.T) { }) } -var olmClusterYAML = ` -metadata: - name: olm -spec: - postgresVersion: 13 - image: postgres - instances: - - name: register-now - dataVolumeClaimSpec: - accessModes: - - "ReadWriteMany" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - image: pgbackrest - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi -` - var _ = Describe("PostgresCluster Reconciler", func() { var test struct { Namespace *corev1.Namespace @@ -137,7 +106,6 @@ var _ = Describe("PostgresCluster Reconciler", func() { test.Reconciler.Client = suite.Client test.Reconciler.Owner = "asdf" test.Reconciler.Recorder = test.Recorder - test.Reconciler.Registration = nil test.Reconciler.Tracer = otel.Tracer("asdf") }) @@ -178,49 +146,6 @@ var _ = Describe("PostgresCluster Reconciler", func() { return result } - Context("Cluster with Registration Requirement, no token", func() { - var cluster *v1beta1.PostgresCluster - - BeforeEach(func() { - test.Reconciler.Registration = registration.RegistrationFunc( - func(record.EventRecorder, client.Object, *[]metav1.Condition) bool { - return true - }) - - cluster = create(olmClusterYAML) - Expect(reconcile(cluster)).To(BeZero()) - }) - - AfterEach(func() { - ctx := context.Background() - - if cluster != nil { - Expect(client.IgnoreNotFound( - suite.Client.Delete(ctx, cluster), - )).To(Succeed()) - - // Remove finalizers, if any, so the namespace can terminate. - Expect(client.IgnoreNotFound( - suite.Client.Patch(ctx, cluster, client.RawPatch( - client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))), - )).To(Succeed()) - } - }) - - Specify("Cluster RegistrationRequired Status", func() { - existing := &v1beta1.PostgresCluster{} - Expect(suite.Client.Get( - context.Background(), client.ObjectKeyFromObject(cluster), existing, - )).To(Succeed()) - - Expect(meta.IsStatusConditionFalse(existing.Status.Conditions, v1beta1.Registered)).To(BeTrue()) - - event, ok := <-test.Recorder.Events - Expect(ok).To(BeTrue()) - Expect(event).To(ContainSubstring("Register Soon")) - }) - }) - Context("Cluster", func() { var cluster *v1beta1.PostgresCluster @@ -556,4 +481,66 @@ spec: Expect(instance.Spec.Replicas).To(PointTo(BeEquivalentTo(1))) }) }) + + Context("Postgres version EOL", func() { + var cluster *v1beta1.PostgresCluster + + BeforeEach(func() { + cluster = create(` +metadata: + name: old-postgres +spec: + postgresVersion: 11 + image: postgres + instances: + - name: instance1 + dataVolumeClaimSpec: + accessModes: + - "ReadWriteMany" + resources: + requests: + storage: 1Gi + backups: + pgbackrest: + image: pgbackrest + repos: + - name: repo1 + volume: + volumeClaimSpec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: 1Gi +`) + Expect(reconcile(cluster)).To(BeZero()) + }) + + AfterEach(func() { + ctx := context.Background() + + if cluster != nil { + Expect(client.IgnoreNotFound( + suite.Client.Delete(ctx, cluster), + )).To(Succeed()) + + // Remove finalizers, if any, so the namespace can terminate. + Expect(client.IgnoreNotFound( + suite.Client.Patch(ctx, cluster, client.RawPatch( + client.Merge.Type(), []byte(`{"metadata":{"finalizers":[]}}`))), + )).To(Succeed()) + } + }) + + Specify("Postgres EOL Warning Event", func() { + existing := &v1beta1.PostgresCluster{} + Expect(suite.Client.Get( + context.Background(), client.ObjectKeyFromObject(cluster), existing, + )).To(Succeed()) + + event, ok := <-test.Recorder.Events + Expect(ok).To(BeTrue()) + Expect(event).To(ContainSubstring("PG 11 will no longer receive updates. We recommend upgrading.")) + }) + }) }) diff --git a/internal/controller/postgrescluster/delete.go b/internal/controller/postgrescluster/delete.go index 63fc007f40..a1a4d322dd 100644 --- a/internal/controller/postgrescluster/delete.go +++ b/internal/controller/postgrescluster/delete.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/helpers_test.go b/internal/controller/postgrescluster/helpers_test.go index 0536b466d4..e6709151b4 100644 --- a/internal/controller/postgrescluster/helpers_test.go +++ b/internal/controller/postgrescluster/helpers_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 66321cc738..6bc3f9e9ec 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -134,7 +134,8 @@ func (i Instance) IsWritable() (writable, known bool) { // TODO(cbandy): Update this to consider when Patroni is paused. - return strings.HasPrefix(member[role:], `"role":"master"`), true + return strings.HasPrefix(member[role:], `"role":"master"`) || + strings.HasPrefix(member[role:], `"role":"primary"`), true } // PodMatchesPodTemplate returns whether or not the Pod for this instance @@ -335,7 +336,7 @@ func (r *Reconciler) observeInstances( status.DesiredPGDataVolume = make(map[string]string) for _, instance := range observed.bySet[name] { - status.Replicas += int32(len(instance.Pods)) //nolint:gosec + status.Replicas += int32(len(instance.Pods)) if ready, known := instance.IsReady(); known && ready { status.ReadyReplicas++ @@ -753,7 +754,7 @@ func findAvailableInstanceNames(set v1beta1.PostgresInstanceSetSpec, } // Determine whether or not the PVC is associated with an existing instance within the same - // instance set. If not, then the instance name associated with that PVC can be be reused. + // instance set. If not, then the instance name associated with that PVC can be reused. for _, pvc := range setVolumes { pvcInstanceName := pvc.GetLabels()[naming.LabelInstance] instance := observedInstances.byName[pvcInstanceName] @@ -1089,7 +1090,7 @@ func (r *Reconciler) scaleUpInstances( // and append it. for len(instances) < int(*set.Replicas) { var span trace.Span - ctx, span = r.Tracer.Start(ctx, "generateInstanceName") + _, span = r.Tracer.Start(ctx, "generateInstanceName") next := naming.GenerateInstance(cluster, set) // if there are any available instance names (as determined by observing any PVCs for the // instance set that are not currently associated with an instance, e.g. in the event the @@ -1175,7 +1176,7 @@ func (r *Reconciler) reconcileInstance( } if err == nil { instanceCertificates, err = r.reconcileInstanceCertificates( - ctx, cluster, spec, instance, rootCA) + ctx, cluster, spec, instance, rootCA, backupsSpecFound) } if err == nil { postgresDataVolume, err = r.reconcilePostgresDataVolume(ctx, cluster, spec, instance, clusterVolumes, nil) @@ -1419,7 +1420,7 @@ func (r *Reconciler) reconcileInstanceConfigMap( func (r *Reconciler) reconcileInstanceCertificates( ctx context.Context, cluster *v1beta1.PostgresCluster, spec *v1beta1.PostgresInstanceSetSpec, instance *appsv1.StatefulSet, - root *pki.RootCertificateAuthority, + root *pki.RootCertificateAuthority, backupsSpecFound bool, ) (*corev1.Secret, error) { existing := &corev1.Secret{ObjectMeta: naming.InstanceCertificates(instance)} err := errors.WithStack(client.IgnoreNotFound( @@ -1462,7 +1463,7 @@ func (r *Reconciler) reconcileInstanceCertificates( root.Certificate, leafCert.Certificate, leafCert.PrivateKey, instanceCerts) } - if err == nil { + if err == nil && backupsSpecFound { err = pgbackrest.InstanceCertificates(ctx, cluster, root.Certificate, leafCert.Certificate, leafCert.PrivateKey, instanceCerts) diff --git a/internal/controller/postgrescluster/instance.md b/internal/controller/postgrescluster/instance.md index f0de4c5d7a..724219ae22 100644 --- a/internal/controller/postgrescluster/instance.md +++ b/internal/controller/postgrescluster/instance.md @@ -1,5 +1,5 @@ diff --git a/internal/controller/postgrescluster/instance_rollout_test.go b/internal/controller/postgrescluster/instance_rollout_test.go index e668907497..ef08e7147d 100644 --- a/internal/controller/postgrescluster/instance_rollout_test.go +++ b/internal/controller/postgrescluster/instance_rollout_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -134,7 +134,7 @@ func TestReconcilerRolloutInstance(t *testing.T) { // A switchover to any viable candidate. assert.DeepEqual(t, command[:2], []string{"patronictl", "switchover"}) - assert.Assert(t, sets.NewString(command...).Has("--master=the-pod")) + assert.Assert(t, sets.NewString(command...).Has("--primary=the-pod")) assert.Assert(t, sets.NewString(command...).Has("--candidate=")) // Indicate success through stdout. diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index f7f59f50a5..7039e9274f 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -6,6 +6,7 @@ package postgrescluster import ( "context" + "errors" "fmt" "os" "sort" @@ -15,7 +16,6 @@ import ( "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/pkg/errors" "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" @@ -119,7 +119,7 @@ func TestInstanceIsWritable(t *testing.T) { assert.Assert(t, !writable) // Patroni leader - instance.Pods[0].Annotations["status"] = `{"role":"master"}` + instance.Pods[0].Annotations["status"] = `{"role":"primary"}` writable, known = instance.IsWritable() assert.Assert(t, known) assert.Assert(t, writable) @@ -394,7 +394,7 @@ func TestWritablePod(t *testing.T) { Namespace: "namespace", Name: "pod", Annotations: map[string]string{ - "status": `{"role":"master"}`, + "status": `{"role":"primary"}`, }, DeletionTimestamp: &metav1.Time{}, }, @@ -428,7 +428,7 @@ func TestWritablePod(t *testing.T) { Namespace: "namespace", Name: "pod", Annotations: map[string]string{ - "status": `{"role":"master"}`, + "status": `{"role":"primary"}`, }, }, Status: corev1.PodStatus{ @@ -493,7 +493,7 @@ func TestWritablePod(t *testing.T) { Namespace: "namespace", Name: "pod", Annotations: map[string]string{ - "status": `{"role":"master"}`, + "status": `{"role":"primary"}`, }, }, Status: corev1.PodStatus{ @@ -546,151 +546,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { }, } - t.Run("NoVolumeRepo", func(t *testing.T) { - cluster := cluster.DeepCopy() - cluster.Spec.Backups.PGBackRest.Repos = nil - - out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) - - // Only Containers and Volumes fields have changed. - assert.DeepEqual(t, pod, *out, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) - - // Only database container has mounts. - // Other containers are ignored. - assert.Assert(t, cmp.MarshalMatches(out.Containers, ` -- name: database - resources: {} - volumeMounts: - - mountPath: /etc/pgbackrest/conf.d - name: pgbackrest-config - readOnly: true -- name: other - resources: {} -- command: - - pgbackrest - - server - livenessProbe: - exec: - command: - - pgbackrest - - server-ping - name: pgbackrest - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/pgbackrest/server - name: pgbackrest-server - readOnly: true - - mountPath: /pgdata - name: postgres-data - - mountPath: /pgwal - name: postgres-wal - - mountPath: /etc/pgbackrest/conf.d - name: pgbackrest-config - readOnly: true -- command: - - bash - - -ceu - - -- - - |- - monitor() { - exec {fd}<> <(:||:) - until read -r -t 5 -u "${fd}"; do - if - [[ "${filename}" -nt "/proc/self/fd/${fd}" ]] && - pkill -HUP --exact --parent=0 pgbackrest - then - exec {fd}>&- && exec {fd}<> <(:||:) - stat --dereference --format='Loaded configuration dated %y' "${filename}" - elif - { [[ "${directory}" -nt "/proc/self/fd/${fd}" ]] || - [[ "${authority}" -nt "/proc/self/fd/${fd}" ]] - } && - pkill -HUP --exact --parent=0 pgbackrest - then - exec {fd}>&- && exec {fd}<> <(:||:) - stat --format='Loaded certificates dated %y' "${directory}" - fi - done - }; export directory="$1" authority="$2" filename="$3"; export -f monitor; exec -a "$0" bash -ceu monitor - - pgbackrest-config - - /etc/pgbackrest/server - - /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt - - /etc/pgbackrest/conf.d/~postgres-operator_server.conf - name: pgbackrest-config - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - privileged: false - readOnlyRootFilesystem: true - runAsNonRoot: true - seccompProfile: - type: RuntimeDefault - volumeMounts: - - mountPath: /etc/pgbackrest/server - name: pgbackrest-server - readOnly: true - - mountPath: /etc/pgbackrest/conf.d - name: pgbackrest-config - readOnly: true - `)) - - // Instance configuration files with certificates. - // Other volumes are ignored. - assert.Assert(t, cmp.MarshalMatches(out.Volumes, ` -- name: other -- name: postgres-data -- name: postgres-wal -- name: pgbackrest-server - projected: - sources: - - secret: - items: - - key: pgbackrest-server.crt - path: server-tls.crt - - key: pgbackrest-server.key - mode: 384 - path: server-tls.key - name: some-secret -- name: pgbackrest-config - projected: - sources: - - configMap: - items: - - key: pgbackrest_instance.conf - path: pgbackrest_instance.conf - - key: config-hash - path: config-hash - - key: pgbackrest-server.conf - path: ~postgres-operator_server.conf - name: hippo-pgbackrest-config - - secret: - items: - - key: pgbackrest.ca-roots - path: ~postgres-operator/tls-ca.crt - - key: pgbackrest-client.crt - path: ~postgres-operator/client-tls.crt - - key: pgbackrest-client.key - mode: 384 - path: ~postgres-operator/client-tls.key - name: hippo-pgbackrest - `)) - }) - - t.Run("OneVolumeRepo", func(t *testing.T) { + t.Run("CloudOrVolumeSameBehavior", func(t *testing.T) { alwaysExpect := func(t testing.TB, result *corev1.PodSpec) { // Only Containers and Volumes fields have changed. assert.DeepEqual(t, pod, *result, cmpopts.IgnoreFields(pod, "Containers", "Volumes")) @@ -735,24 +591,35 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest + optional: true `)) } - cluster := cluster.DeepCopy() - cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + clusterWithVolume := cluster.DeepCopy() + clusterWithVolume.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ { Name: "repo1", Volume: new(v1beta1.RepoPVC), }, } - out := pod.DeepCopy() - addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) - alwaysExpect(t, out) + clusterWithCloudRepo := cluster.DeepCopy() + clusterWithCloudRepo.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: new(v1beta1.RepoGCS), + }, + } - // The TLS server is added and configuration mounted. - // It has PostgreSQL volumes mounted while other volumes are ignored. - assert.Assert(t, cmp.MarshalMatches(out.Containers, ` + outWithVolume := pod.DeepCopy() + addPGBackRestToInstancePodSpec(ctx, clusterWithVolume, &certificates, outWithVolume) + alwaysExpect(t, outWithVolume) + + outWithCloudRepo := pod.DeepCopy() + addPGBackRestToInstancePodSpec(ctx, clusterWithCloudRepo, &certificates, outWithCloudRepo) + alwaysExpect(t, outWithCloudRepo) + + outContainers := ` - name: database resources: {} volumeMounts: @@ -840,7 +707,12 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { - mountPath: /etc/pgbackrest/conf.d name: pgbackrest-config readOnly: true - `)) + ` + + // The TLS server is added and configuration mounted. + // It has PostgreSQL volumes mounted while other volumes are ignored. + assert.Assert(t, cmp.MarshalMatches(outWithVolume.Containers, outContainers)) + assert.Assert(t, cmp.MarshalMatches(outWithCloudRepo.Containers, outContainers)) t.Run("CustomResources", func(t *testing.T) { cluster := cluster.DeepCopy() @@ -857,7 +729,7 @@ func TestAddPGBackRestToInstancePodSpec(t *testing.T) { }, } - before := out.DeepCopy() + before := outWithVolume.DeepCopy() out := pod.DeepCopy() addPGBackRestToInstancePodSpec(ctx, cluster, &certificates, out) alwaysExpect(t, out) @@ -966,7 +838,7 @@ func TestPodsToKeep(t *testing.T) { checks func(*testing.T, []corev1.Pod) }{ { - name: "RemoveSetWithMasterOnly", + name: "RemoveSetWithPrimaryOnly", instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ @@ -1000,7 +872,7 @@ func TestPodsToKeep(t *testing.T) { assert.Equal(t, len(p), 0) }, }, { - name: "KeepMasterOnly", + name: "KeepPrimaryOnly", instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ @@ -1089,7 +961,7 @@ func TestPodsToKeep(t *testing.T) { assert.Equal(t, len(p), 0) }, }, { - name: "MasterLastInSet", + name: "PrimaryLastInSet", instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ @@ -1118,7 +990,7 @@ func TestPodsToKeep(t *testing.T) { assert.Equal(t, p[0].Labels[naming.LabelRole], "master") }, }, { - name: "ScaleDownSetWithMaster", + name: "ScaleDownSetWithPrimary", instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ @@ -1169,7 +1041,7 @@ func TestPodsToKeep(t *testing.T) { assert.Equal(t, p[1].Labels[naming.LabelInstanceSet], "max") }, }, { - name: "ScaleDownSetWithoutMaster", + name: "ScaleDownSetWithoutPrimary", instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ @@ -1222,7 +1094,7 @@ func TestPodsToKeep(t *testing.T) { assert.Equal(t, p[2].Labels[naming.LabelRole], "replica") }, }, { - name: "ScaleMasterSetToZero", + name: "ScalePrimarySetToZero", instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ @@ -1264,7 +1136,7 @@ func TestPodsToKeep(t *testing.T) { assert.Equal(t, p[1].Labels[naming.LabelInstanceSet], "daisy") }, }, { - name: "RemoveMasterInstanceSet", + name: "RemovePrimaryInstanceSet", instances: []corev1.Pod{ { ObjectMeta: metav1.ObjectMeta{ @@ -1347,7 +1219,7 @@ func TestDeleteInstance(t *testing.T) { cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) + assert.NilError(t, reconciler.Client.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( @@ -1397,9 +1269,9 @@ func TestDeleteInstance(t *testing.T) { err := wait.PollUntilContextTimeout(ctx, time.Second*3, Scale(time.Second*30), false, func(ctx context.Context) (bool, error) { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, errors.WithStack(reconciler.Client.List(ctx, uList, + assert.NilError(t, reconciler.Client.List(ctx, uList, client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector}))) + client.MatchingLabelsSelector{Selector: selector})) if len(uList.Items) == 0 { return true, nil @@ -1710,7 +1582,6 @@ func TestGenerateInstanceStatefulSetIntent(t *testing.T) { `)) }, }} { - test := test t.Run(test.name, func(t *testing.T) { cluster := test.ip.cluster diff --git a/internal/controller/postgrescluster/patroni.go b/internal/controller/postgrescluster/patroni.go index 1c5ac93eed..855f48a356 100644 --- a/internal/controller/postgrescluster/patroni.go +++ b/internal/controller/postgrescluster/patroni.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -94,7 +94,7 @@ func (r *Reconciler) handlePatroniRestarts( return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) }) - return errors.WithStack(exec.RestartPendingMembers(ctx, "master", naming.PatroniScope(cluster))) + return errors.WithStack(exec.RestartPendingMembers(ctx, "primary", naming.PatroniScope(cluster))) } // When the primary does not need to restart but a replica does, restart all diff --git a/internal/controller/postgrescluster/patroni_test.go b/internal/controller/postgrescluster/patroni_test.go index b2a457685b..50a762977d 100644 --- a/internal/controller/postgrescluster/patroni_test.go +++ b/internal/controller/postgrescluster/patroni_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -6,6 +6,7 @@ package postgrescluster import ( "context" + "errors" "fmt" "io" "os" @@ -14,7 +15,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -97,12 +97,12 @@ ownerReferences: assert.NilError(t, err) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "postgres-operator.crunchydata.com/cluster": "pg2", "postgres-operator.crunchydata.com/patroni": "pg2-ha", @@ -125,13 +125,13 @@ ownerReferences: assert.NilError(t, err) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", "c": "v3", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "d": "v4", "postgres-operator.crunchydata.com/cluster": "pg2", @@ -472,15 +472,15 @@ func TestReconcilePatroniStatus(t *testing.T) { ObjectMeta: naming.PatroniDistributedConfiguration(postgresCluster), } if writeAnnotation { - endpoints.ObjectMeta.Annotations = make(map[string]string) - endpoints.ObjectMeta.Annotations["initialize"] = systemIdentifier + endpoints.Annotations = make(map[string]string) + endpoints.Annotations["initialize"] = systemIdentifier } assert.NilError(t, tClient.Create(ctx, endpoints, &client.CreateOptions{})) instance := &Instance{ Name: instanceName, Runner: runner, } - for i := 0; i < readyReplicas; i++ { + for range readyReplicas { instance.Pods = append(instance.Pods, &corev1.Pod{ Status: corev1.PodStatus{ Conditions: []corev1.PodCondition{{ diff --git a/internal/controller/postgrescluster/pgadmin.go b/internal/controller/postgrescluster/pgadmin.go index c0a936ba1f..28611b8ef6 100644 --- a/internal/controller/postgrescluster/pgadmin.go +++ b/internal/controller/postgrescluster/pgadmin.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/pgadmin_test.go b/internal/controller/postgrescluster/pgadmin_test.go index 92ec6f42f1..4fca974241 100644 --- a/internal/controller/postgrescluster/pgadmin_test.go +++ b/internal/controller/postgrescluster/pgadmin_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -6,11 +6,11 @@ package postgrescluster import ( "context" + "errors" "io" "strconv" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -104,12 +104,12 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, configmap.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, configmap.Annotations, map[string]string{ "a": "v5", "b": "v2", "e": "v6", }) // Labels present in the metadata. - assert.DeepEqual(t, configmap.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, configmap.Labels, map[string]string{ "c": "v7", "d": "v4", "f": "v8", "postgres-operator.crunchydata.com/cluster": "pg1", "postgres-operator.crunchydata.com/role": "pgadmin", @@ -194,12 +194,12 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "postgres-operator.crunchydata.com/cluster": "my-cluster", "postgres-operator.crunchydata.com/role": "pgadmin", @@ -225,13 +225,13 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", "c": "v3", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "d": "v4", "postgres-operator.crunchydata.com/cluster": "my-cluster", diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index fdfc709f49..059a2f4ec9 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -40,6 +40,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/internal/util" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -125,9 +126,9 @@ type RepoResources struct { // strategy. func (r *Reconciler) applyRepoHostIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repoHostName string, repoResources *RepoResources, - observedInstances *observedInstances) (*appsv1.StatefulSet, error) { + observedInstances *observedInstances, saName string) (*appsv1.StatefulSet, error) { - repo, err := r.generateRepoHostIntent(ctx, postgresCluster, repoHostName, repoResources, observedInstances) + repo, err := r.generateRepoHostIntent(ctx, postgresCluster, repoHostName, repoResources, observedInstances, saName) if err != nil { return nil, err } @@ -325,10 +326,12 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, // TODO(tjmoore4): This can be removed once 5.0 is EOL. if owned.GetName() != naming.PGBackRestSSHConfig(postgresCluster).Name && owned.GetName() != naming.PGBackRestSSHSecret(postgresCluster).Name { - // If a dedicated repo host resource and a dedicated repo host is enabled, then - // add to the slice and do not delete. - ownedNoDelete = append(ownedNoDelete, owned) - delete = false + // If it is a dedicated repo host resource and a dedicated repo + // host is enabled, then add to the slice and do not delete. + if pgbackrest.RepoHostVolumeDefined(postgresCluster) { + ownedNoDelete = append(ownedNoDelete, owned) + delete = false + } } case hasLabel(naming.LabelPGBackRestRepoVolume): if !backupsSpecFound { @@ -345,18 +348,6 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, delete = false } } - case hasLabel(naming.LabelPGBackRestBackup): - if !backupsSpecFound { - break - } - // If a Job is identified for a repo that no longer exists in the spec then - // delete it. Otherwise add it to the slice and continue. - for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { - if repo.Name == owned.GetLabels()[naming.LabelPGBackRestRepo] { - ownedNoDelete = append(ownedNoDelete, owned) - delete = false - } - } case hasLabel(naming.LabelPGBackRestCronJob): if !backupsSpecFound { break @@ -371,6 +362,18 @@ func (r *Reconciler) cleanupRepoResources(ctx context.Context, break } } + case hasLabel(naming.LabelPGBackRestBackup): + if !backupsSpecFound { + break + } + // If a Job is identified for a repo that no longer exists in the spec then + // delete it. Otherwise add it to the slice and continue. + for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if repo.Name == owned.GetLabels()[naming.LabelPGBackRestRepo] { + ownedNoDelete = append(ownedNoDelete, owned) + delete = false + } + } case hasLabel(naming.LabelPGBackRestRestore): if !backupsSpecFound { break @@ -549,8 +552,9 @@ func (r *Reconciler) setScheduledJobStatus(ctx context.Context, for _, job := range jobList.Items { // we only care about the scheduled backup Jobs created by the // associated CronJobs - sbs := v1beta1.PGBackRestScheduledBackupStatus{} if job.GetLabels()[naming.LabelPGBackRestCronJob] != "" { + sbs := v1beta1.PGBackRestScheduledBackupStatus{} + if len(job.OwnerReferences) > 0 { sbs.CronJobName = job.OwnerReferences[0].Name } @@ -577,7 +581,7 @@ func (r *Reconciler) setScheduledJobStatus(ctx context.Context, // as needed to create and reconcile a pgBackRest dedicated repository host within the kubernetes // cluster. func (r *Reconciler) generateRepoHostIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, - repoHostName string, repoResources *RepoResources, observedInstances *observedInstances, + repoHostName string, repoResources *RepoResources, observedInstances *observedInstances, saName string, ) (*appsv1.StatefulSet, error) { annotations := naming.Merge( @@ -687,19 +691,20 @@ func (r *Reconciler) generateRepoHostIntent(ctx context.Context, postgresCluster repo.Spec.Template.Spec.SecurityContext = postgres.PodSecurityContext(postgresCluster) + repo.Spec.Template.Spec.ServiceAccountName = saName + pgbackrest.AddServerToRepoPod(ctx, postgresCluster, &repo.Spec.Template.Spec) - if pgbackrest.RepoHostVolumeDefined(postgresCluster) { - // add the init container to make the pgBackRest repo volume log directory - pgbackrest.MakePGBackrestLogDir(&repo.Spec.Template, postgresCluster) + // add the init container to make the pgBackRest repo volume log directory + pgbackrest.MakePGBackrestLogDir(&repo.Spec.Template, postgresCluster) - // add pgBackRest repo volumes to pod - if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, - getRepoPVCNames(postgresCluster, repoResources.pvcs), - naming.PGBackRestRepoContainerName); err != nil { - return nil, errors.WithStack(err) - } + // add pgBackRest repo volumes to pod + if err := pgbackrest.AddRepoVolumesToPod(postgresCluster, &repo.Spec.Template, + getRepoPVCNames(postgresCluster, repoResources.pvcs), + naming.PGBackRestRepoContainerName); err != nil { + return nil, errors.WithStack(err) } + // add configs to pod pgbackrest.AddConfigToRepoPod(postgresCluster, &repo.Spec.Template.Spec) @@ -768,7 +773,7 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC } // generateBackupJobSpecIntent generates a JobSpec for a pgBackRest backup job -func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, +func (r *Reconciler) generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repo v1beta1.PGBackRestRepo, serviceAccountName string, labels, annotations map[string]string, opts ...string) *batchv1.JobSpec { @@ -785,19 +790,29 @@ func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.P cmdOpts = append(cmdOpts, opts...) container := corev1.Container{ - Command: []string{"/opt/crunchy/bin/pgbackrest"}, - Env: []corev1.EnvVar{ + Image: config.PGBackRestContainerImage(postgresCluster), + ImagePullPolicy: postgresCluster.Spec.ImagePullPolicy, + Name: naming.PGBackRestRepoContainerName, + SecurityContext: initialize.RestrictedSecurityContext(), + } + + // If the repo that we are backing up to is a local volume, we will configure + // the job to use the pgbackrest go binary to exec into the repo host and run + // the backup. If the repo is a cloud-based repo, we will run the pgbackrest + // backup command directly in the job pod. + if repo.Volume != nil { + container.Command = []string{"/opt/crunchy/bin/pgbackrest"} + container.Env = []corev1.EnvVar{ {Name: "COMMAND", Value: "backup"}, {Name: "COMMAND_OPTS", Value: strings.Join(cmdOpts, " ")}, {Name: "COMPARE_HASH", Value: "true"}, {Name: "CONTAINER", Value: naming.PGBackRestRepoContainerName}, {Name: "NAMESPACE", Value: postgresCluster.GetNamespace()}, {Name: "SELECTOR", Value: naming.PGBackRestDedicatedSelector(postgresCluster.GetName()).String()}, - }, - Image: config.PGBackRestContainerImage(postgresCluster), - ImagePullPolicy: postgresCluster.Spec.ImagePullPolicy, - Name: naming.PGBackRestRepoContainerName, - SecurityContext: initialize.RestrictedSecurityContext(), + } + } else { + container.Command = []string{"/bin/pgbackrest", "backup"} + container.Command = append(container.Command, cmdOpts...) } if postgresCluster.Spec.Backups.PGBackRest.Jobs != nil { @@ -845,7 +860,35 @@ func generateBackupJobSpecIntent(ctx context.Context, postgresCluster *v1beta1.P jobSpec.Template.Spec.ImagePullSecrets = postgresCluster.Spec.ImagePullSecrets // add pgBackRest configs to template - pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) + if repo.Volume != nil { + pgbackrest.AddConfigToRepoPod(postgresCluster, &jobSpec.Template.Spec) + } else { + // If we are doing a cloud repo backup, we need to give pgbackrest proper permissions + // to read certificate files + jobSpec.Template.Spec.SecurityContext = postgres.PodSecurityContext(postgresCluster) + pgbackrest.AddConfigToCloudBackupJob(postgresCluster, &jobSpec.Template) + + // If the user has specified a PVC to use as a log volume via the PGBackRestCloudLogVolume + // annotation, check for the PVC. If we find it, mount it to the backup job. + // Otherwise, create a warning event. + if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" { + logVolume := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: logVolumeName, + Namespace: postgresCluster.GetNamespace(), + }, + } + err := errors.WithStack(r.Client.Get(ctx, + client.ObjectKeyFromObject(logVolume), logVolume)) + if err != nil { + // PVC not retrieved, create warning event + r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, "PGBackRestCloudLogVolumeNotFound", err.Error()) + } else { + // We successfully found the specified PVC, so we will add it to the backup job + util.AddVolumeAndMountsToPod(&jobSpec.Template.Spec, logVolume) + } + } + } return jobSpec } @@ -1150,10 +1193,16 @@ func (r *Reconciler) reconcileRestoreJob(ctx context.Context, "--pg1-path=" + pgdata, "--repo=" + regexRepoIndex.FindString(repoName)}...) + // Look specifically for the "--target" flag, NOT flags that contain + // "--target" (e.g. "--target-timeline") + targetRegex, err := regexp.Compile("--target[ =]") + if err != nil { + return err + } var deltaOptFound, foundTarget bool for _, opt := range opts { switch { - case strings.Contains(opt, "--target"): + case targetRegex.MatchString(opt): foundTarget = true case strings.Contains(opt, "--delta"): deltaOptFound = true @@ -1377,16 +1426,29 @@ func (r *Reconciler) reconcilePGBackRest(ctx context.Context, return result, nil } - var repoHost *appsv1.StatefulSet - var repoHostName string - // reconcile the pgbackrest repository host - repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources, instances) + // reconcile the RBAC required to run the pgBackRest Repo Host + repoHostSA, err := r.reconcileRepoHostRBAC(ctx, postgresCluster) if err != nil { - log.Error(err, "unable to reconcile pgBackRest repo host") + log.Error(err, "unable to reconcile pgBackRest repo host RBAC") result.Requeue = true return result, nil } - repoHostName = repoHost.GetName() + + var repoHost *appsv1.StatefulSet + var repoHostName string + if pgbackrest.RepoHostVolumeDefined(postgresCluster) { + // reconcile the pgbackrest repository host + repoHost, err = r.reconcileDedicatedRepoHost(ctx, postgresCluster, repoResources, instances, repoHostSA.GetName()) + if err != nil { + log.Error(err, "unable to reconcile pgBackRest repo host") + result.Requeue = true + return result, nil + } + repoHostName = repoHost.GetName() + } else { + // remove the dedicated repo host status if a dedicated host is not enabled + meta.RemoveStatusCondition(&postgresCluster.Status.Conditions, ConditionRepoHostReady) + } if err := r.reconcilePGBackRestSecret(ctx, postgresCluster, repoHost, rootCA); err != nil { log.Error(err, "unable to reconcile pgBackRest secret") @@ -1984,16 +2046,39 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, return nil } -// reconcilePGBackRestConfig is responsible for reconciling the pgBackRest ConfigMaps and Secrets. +// reconcilePGBackRestConfig is responsible for reconciling the pgBackRest ConfigMaps. func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repoHostName, configHash, serviceName, serviceNamespace string, instanceNames []string) error { + // If the user has specified a PVC to use as a log volume for cloud backups via the + // PGBackRestCloudLogVolume annotation, check for the PVC. If we find it, set the cloud + // log path. If the user has specified a PVC, but we can't find it, create a warning event. + cloudLogPath := "" + if logVolumeName := postgresCluster.Annotations[naming.PGBackRestCloudLogVolume]; logVolumeName != "" { + logVolume := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: logVolumeName, + Namespace: postgresCluster.GetNamespace(), + }, + } + err := errors.WithStack(r.Client.Get(ctx, + client.ObjectKeyFromObject(logVolume), logVolume)) + if err != nil { + // PVC not retrieved, create warning event + r.Recorder.Event(postgresCluster, corev1.EventTypeWarning, + "PGBackRestCloudLogVolumeNotFound", err.Error()) + } else { + // We successfully found the specified PVC, so we will set the log path + cloudLogPath = "/volumes/" + logVolumeName + } + } + backrestConfig := pgbackrest.CreatePGBackRestConfigMapIntent(postgresCluster, repoHostName, - configHash, serviceName, serviceNamespace, instanceNames) - if err := controllerutil.SetControllerReference(postgresCluster, backrestConfig, - r.Client.Scheme()); err != nil { + configHash, serviceName, serviceNamespace, cloudLogPath, instanceNames) + + if err := r.setControllerReference(postgresCluster, backrestConfig); err != nil { return err } if err := r.apply(ctx, backrestConfig); err != nil { @@ -2116,12 +2201,39 @@ func (r *Reconciler) reconcilePGBackRestRBAC(ctx context.Context, return sa, nil } +// +kubebuilder:rbac:groups="",resources="serviceaccounts",verbs={create,patch} + +// reconcileRepoHostRBAC reconciles the ServiceAccount for the pgBackRest repo host +func (r *Reconciler) reconcileRepoHostRBAC(ctx context.Context, + postgresCluster *v1beta1.PostgresCluster) (*corev1.ServiceAccount, error) { + + sa := &corev1.ServiceAccount{ObjectMeta: naming.RepoHostRBAC(postgresCluster)} + sa.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ServiceAccount")) + + if err := r.setControllerReference(postgresCluster, sa); err != nil { + return nil, errors.WithStack(err) + } + + sa.Annotations = naming.Merge(postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), + postgresCluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil()) + sa.Labels = naming.Merge(postgresCluster.Spec.Metadata.GetLabelsOrNil(), + postgresCluster.Spec.Backups.PGBackRest.Metadata.GetLabelsOrNil(), + naming.PGBackRestLabels(postgresCluster.GetName())) + + if err := r.apply(ctx, sa); err != nil { + return nil, errors.WithStack(err) + } + + return sa, nil +} + // reconcileDedicatedRepoHost is responsible for reconciling a pgBackRest dedicated repository host // StatefulSet according to a specific PostgresCluster custom resource. func (r *Reconciler) reconcileDedicatedRepoHost(ctx context.Context, postgresCluster *v1beta1.PostgresCluster, repoResources *RepoResources, - observedInstances *observedInstances) (*appsv1.StatefulSet, error) { + observedInstances *observedInstances, + saName string) (*appsv1.StatefulSet, error) { log := logging.FromContext(ctx).WithValues("reconcileResource", "repoHost") @@ -2162,7 +2274,7 @@ func (r *Reconciler) reconcileDedicatedRepoHost(ctx context.Context, } repoHostName := repoResources.hosts[0].Name repoHost, err := r.applyRepoHostIntent(ctx, postgresCluster, repoHostName, repoResources, - observedInstances) + observedInstances, saName) if err != nil { log.Error(err, "reconciling repository host") return nil, err @@ -2172,7 +2284,7 @@ func (r *Reconciler) reconcileDedicatedRepoHost(ctx context.Context, if isCreate { r.Recorder.Eventf(postgresCluster, corev1.EventTypeNormal, EventRepoHostCreated, - "created pgBackRest repository host %s/%s", repoHost.TypeMeta.Kind, repoHostName) + "created pgBackRest repository host %s/%s", repoHost.Kind, repoHostName) } return repoHost, nil @@ -2286,11 +2398,13 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, return nil } - // determine if the dedicated repository host is ready using the repo host ready + // determine if the dedicated repository host is ready (if enabled) using the repo host ready // condition, and return if not - repoCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) - if repoCondition == nil || repoCondition.Status != metav1.ConditionTrue { - return nil + if pgbackrest.RepoHostVolumeDefined(postgresCluster) { + repoCondition := meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) + if repoCondition == nil || repoCondition.Status != metav1.ConditionTrue { + return nil + } } // Determine if the replica create backup is complete and return if not. This allows for proper @@ -2356,7 +2470,7 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, backupJob := &batchv1.Job{} backupJob.ObjectMeta = naming.PGBackRestBackupJob(postgresCluster) if currentBackupJob != nil { - backupJob.ObjectMeta.Name = currentBackupJob.ObjectMeta.Name + backupJob.Name = currentBackupJob.Name } var labels, annotations map[string]string @@ -2369,10 +2483,10 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, map[string]string{ naming.PGBackRestBackup: manualAnnotation, }) - backupJob.ObjectMeta.Labels = labels - backupJob.ObjectMeta.Annotations = annotations + backupJob.Labels = labels + backupJob.Annotations = annotations - spec := generateBackupJobSpecIntent(ctx, postgresCluster, repo, + spec := r.generateBackupJobSpecIntent(ctx, postgresCluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) backupJob.Spec = *spec @@ -2462,6 +2576,17 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, replicaRepoReady = (condition.Status == metav1.ConditionTrue) } + // TODO: Since we now only exec into the repo host when backing up to a local volume and + // run the backup in the job pod when backing up to a cloud-based repo, we should consider + // using a different value than the container name for the "pgbackrest-config" annotation + // that we attach to these backups + var containerName string + if replicaCreateRepo.Volume != nil { + containerName = naming.PGBackRestRepoContainerName + } else { + containerName = naming.ContainerDatabase + } + // determine if the dedicated repository host is ready using the repo host ready status var dedicatedRepoReady bool condition = meta.FindStatusCondition(postgresCluster.Status.Conditions, ConditionRepoHostReady) @@ -2488,10 +2613,14 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // - The job has failed. The Job will be deleted and recreated to try again. // - The replica creation repo has changed since the Job was created. Delete and recreate // with the Job with the proper repo configured. + // - The "config" annotation has changed, indicating there is a new primary. Delete and + // recreate the Job with the proper config mounted (applicable when a dedicated repo + // host is not enabled). // - The "config hash" annotation has changed, indicating a configuration change has been // made in the spec (specifically a change to the config for an external repo). Delete // and recreate the Job with proper hash per the current config. if failed || replicaCreateRepoChanged || + (job.GetAnnotations()[naming.PGBackRestCurrentConfig] != containerName) || (job.GetAnnotations()[naming.PGBackRestConfigHash] != configHash) { if err := r.Client.Delete(ctx, job, client.PropagationPolicy(metav1.DeletePropagationBackground)); err != nil { @@ -2509,7 +2638,8 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // return if no job has been created and the replica repo or the dedicated // repo host is not ready - if job == nil && (!dedicatedRepoReady || !replicaRepoReady) { + if job == nil && ((pgbackrest.RepoHostVolumeDefined(postgresCluster) && !dedicatedRepoReady) || + !replicaRepoReady) { return nil } @@ -2517,7 +2647,7 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, backupJob := &batchv1.Job{} backupJob.ObjectMeta = naming.PGBackRestBackupJob(postgresCluster) if job != nil { - backupJob.ObjectMeta.Name = job.ObjectMeta.Name + backupJob.Name = job.Name } var labels, annotations map[string]string @@ -2528,12 +2658,13 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, annotations = naming.Merge(postgresCluster.Spec.Metadata.GetAnnotationsOrNil(), postgresCluster.Spec.Backups.PGBackRest.Metadata.GetAnnotationsOrNil(), map[string]string{ - naming.PGBackRestConfigHash: configHash, + naming.PGBackRestCurrentConfig: containerName, + naming.PGBackRestConfigHash: configHash, }) - backupJob.ObjectMeta.Labels = labels - backupJob.ObjectMeta.Annotations = annotations + backupJob.Labels = labels + backupJob.Annotations = annotations - spec := generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, + spec := r.generateBackupJobSpecIntent(ctx, postgresCluster, replicaCreateRepo, serviceAccount.GetName(), labels, annotations) backupJob.Spec = *spec @@ -2698,7 +2829,7 @@ func (r *Reconciler) reconcileStanzaCreate(ctx context.Context, } // Don't record event or return an error if configHashMismatch is true, since this just means // configuration changes in ConfigMaps/Secrets have not yet propagated to the container. - // Therefore, just log an an info message and return an error to requeue and try again. + // Therefore, just log an info message and return an error to requeue and try again. if configHashMismatch { return true, nil @@ -2960,7 +3091,7 @@ func (r *Reconciler) reconcilePGBackRestCronJob( // set backup type (i.e. "full", "diff", "incr") backupOpts := []string{"--type=" + backupType} - jobSpec := generateBackupJobSpecIntent(ctx, cluster, repo, + jobSpec := r.generateBackupJobSpecIntent(ctx, cluster, repo, serviceAccount.GetName(), labels, annotations, backupOpts...) // Suspend cronjobs when shutdown or read-only. Any jobs that have already diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index 8e34dabb5e..74ac58bc39 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -37,11 +37,13 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + controllerruntime "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/pki" "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/events" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -331,6 +333,8 @@ schedulerName: default-scheduler securityContext: fsGroup: 26 fsGroupChangePolicy: OnRootMismatch +serviceAccount: hippocluster-repohost +serviceAccountName: hippocluster-repohost shareProcessNamespace: true terminationGracePeriodSeconds: 30 tolerations: @@ -464,9 +468,10 @@ topologySpreadConstraints: var instanceConfFound, dedicatedRepoConfFound bool for k, v := range config.Data { if v != "" { - if k == pgbackrest.CMInstanceKey { + switch k { + case pgbackrest.CMInstanceKey: instanceConfFound = true - } else if k == pgbackrest.CMRepoKey { + case pgbackrest.CMRepoKey: dedicatedRepoConfFound = true } } @@ -727,6 +732,42 @@ func TestReconcilePGBackRestRBAC(t *testing.T) { assert.Assert(t, foundSubject) } +func TestReconcileRepoHostRBAC(t *testing.T) { + // Garbage collector cleans up test resources before the test completes + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("USE_EXISTING_CLUSTER: Test fails due to garbage collection") + } + + ctx := context.Background() + _, tClient := setupKubernetes(t) + require.ParallelCapacity(t, 0) + + r := &Reconciler{Client: tClient, Owner: client.FieldOwner(t.Name())} + + clusterName := "hippocluster" + clusterUID := "hippouid" + + ns := setupNamespace(t, tClient) + + // create a PostgresCluster to test with + postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) + postgresCluster.Status.PGBackRest = &v1beta1.PGBackRestStatus{ + Repos: []v1beta1.RepoStatus{{Name: "repo1", StanzaCreated: false}}, + } + + serviceAccount, err := r.reconcileRepoHostRBAC(ctx, postgresCluster) + assert.NilError(t, err) + assert.Assert(t, serviceAccount != nil) + + // verify the service account has been created + sa := &corev1.ServiceAccount{} + err = tClient.Get(ctx, types.NamespacedName{ + Name: naming.RepoHostRBAC(postgresCluster).Name, + Namespace: postgresCluster.GetNamespace(), + }, sa) + assert.NilError(t, err) +} + func TestReconcileStanzaCreate(t *testing.T) { cfg, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) @@ -755,7 +796,7 @@ func TestReconcileStanzaCreate(t *testing.T) { instances := newObservedInstances(postgresCluster, nil, []corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"status": `"role":"master"`}, + Annotations: map[string]string{"status": `"role":"primary"`}, Labels: map[string]string{ naming.LabelCluster: postgresCluster.GetName(), naming.LabelInstance: "", @@ -871,7 +912,7 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { } instances := newObservedInstances(postgresCluster, nil, []corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ - Annotations: map[string]string{"status": `"role":"master"`}, + Annotations: map[string]string{"status": `"role":"primary"`}, Labels: map[string]string{ naming.LabelCluster: postgresCluster.GetName(), naming.LabelInstance: "", @@ -922,7 +963,7 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { var foundOwnershipRef bool // verify ownership refs - for _, ref := range backupJob.ObjectMeta.GetOwnerReferences() { + for _, ref := range backupJob.GetOwnerReferences() { if ref.Name == clusterName { foundOwnershipRef = true break @@ -930,13 +971,17 @@ func TestReconcileReplicaCreateBackup(t *testing.T) { } assert.Assert(t, foundOwnershipRef) - var foundHashAnnotation bool + var foundConfigAnnotation, foundHashAnnotation bool // verify annotations for k, v := range backupJob.GetAnnotations() { + if k == naming.PGBackRestCurrentConfig && v == naming.PGBackRestRepoContainerName { + foundConfigAnnotation = true + } if k == naming.PGBackRestConfigHash && v == configHash { foundHashAnnotation = true } } + assert.Assert(t, foundConfigAnnotation) assert.Assert(t, foundHashAnnotation) // verify container & env vars @@ -1353,7 +1398,7 @@ func TestReconcileManualBackup(t *testing.T) { instances.forCluster[0].Pods[0].Annotations = map[string]string{} } else { instances.forCluster[0].Pods[0].Annotations = map[string]string{ - "status": `"role":"master"`, + "status": `"role":"primary"`, } } @@ -1635,7 +1680,14 @@ func TestGetPGBackRestResources(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: naming.PGBackRestDedicatedLabels(clusterName), }, - Spec: corev1.PodSpec{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + }, }, }, }, @@ -1658,11 +1710,54 @@ func TestGetPGBackRestResources(t *testing.T) { jobCount: 0, pvcCount: 0, hostCount: 1, }, }, { - desc: "no dedicated repo host defined, dedicated sts not deleted", + desc: "no dedicated repo host defined delete dedicated sts", + createResources: []client.Object{ + &appsv1.StatefulSet{ + ObjectMeta: metav1.ObjectMeta{ + Name: "delete-dedicated", + Namespace: namespace, + Labels: naming.PGBackRestDedicatedLabels(clusterName), + }, + Spec: appsv1.StatefulSetSpec{ + Selector: metav1.SetAsLabelSelector( + naming.PGBackRestDedicatedLabels(clusterName)), + Template: corev1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: naming.PGBackRestDedicatedLabels(clusterName), + }, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + }, + }, + }, + }, + }, + cluster: &v1beta1.PostgresCluster{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + UID: types.UID(clusterUID), + }, + Spec: v1beta1.PostgresClusterSpec{ + Backups: v1beta1.Backups{ + PGBackRest: v1beta1.PGBackRestArchive{}, + }, + }, + }, + result: testResult{ + jobCount: 0, pvcCount: 0, hostCount: 0, + }, + }, { + desc: "no repo host defined delete dedicated sts", createResources: []client.Object{ &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ - Name: "keep-dedicated-two", + Name: "delete-dedicated-no-repo-host", Namespace: namespace, Labels: naming.PGBackRestDedicatedLabels(clusterName), }, @@ -1673,7 +1768,14 @@ func TestGetPGBackRestResources(t *testing.T) { ObjectMeta: metav1.ObjectMeta{ Labels: naming.PGBackRestDedicatedLabels(clusterName), }, - Spec: corev1.PodSpec{}, + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + { + Name: "some-container", + Image: "some-image", + }, + }, + }, }, }, }, @@ -1691,8 +1793,7 @@ func TestGetPGBackRestResources(t *testing.T) { }, }, result: testResult{ - // Host count is 2 due to previous repo host sts not being deleted. - jobCount: 0, pvcCount: 0, hostCount: 2, + jobCount: 0, pvcCount: 0, hostCount: 0, }, }} @@ -1739,6 +1840,9 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { configCount, jobCount, pvcCount int invalidSourceRepo, invalidSourceCluster, invalidOptions bool expectedClusterCondition *metav1.Condition + expectedEventMessage string + expectedCommandPieces []string + missingCommandPieces []string } for _, dedicated := range []bool{true, false} { @@ -1761,6 +1865,8 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { configCount: 1, jobCount: 1, pvcCount: 1, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: false, expectedClusterCondition: nil, + expectedCommandPieces: []string{"--stanza=", "--pg1-path=", "--repo=", "--delta"}, + missingCommandPieces: []string{"--target-action"}, }, }, { desc: "invalid source cluster", @@ -1774,6 +1880,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { configCount: 0, jobCount: 0, pvcCount: 0, invalidSourceRepo: false, invalidSourceCluster: true, invalidOptions: false, expectedClusterCondition: nil, + expectedEventMessage: "does not exist", }, }, { desc: "invalid source repo", @@ -1787,6 +1894,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { configCount: 1, jobCount: 0, pvcCount: 0, invalidSourceRepo: true, invalidSourceCluster: false, invalidOptions: false, expectedClusterCondition: nil, + expectedEventMessage: "does not have a repo named", }, }, { desc: "invalid option: --repo=", @@ -1801,6 +1909,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { configCount: 1, jobCount: 0, pvcCount: 1, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, expectedClusterCondition: nil, + expectedEventMessage: "Option '--repo' is not allowed: please use the 'repoName' field instead.", }, }, { desc: "invalid option: --repo ", @@ -1815,6 +1924,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { configCount: 1, jobCount: 0, pvcCount: 1, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, expectedClusterCondition: nil, + expectedEventMessage: "Option '--repo' is not allowed: please use the 'repoName' field instead.", }, }, { desc: "invalid option: stanza", @@ -1829,6 +1939,7 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { configCount: 1, jobCount: 0, pvcCount: 1, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, expectedClusterCondition: nil, + expectedEventMessage: "Option '--stanza' is not allowed: the operator will automatically set this option", }, }, { desc: "invalid option: pg1-path", @@ -1843,6 +1954,68 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { configCount: 1, jobCount: 0, pvcCount: 1, invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, expectedClusterCondition: nil, + expectedEventMessage: "Option '--pg1-path' is not allowed: the operator will automatically set this option", + }, + }, { + desc: "invalid option: target-action", + dataSource: &v1beta1.DataSource{PostgresCluster: &v1beta1.PostgresClusterDataSource{ + ClusterName: "invalid-target-action-option", RepoName: "repo1", + Options: []string{"--target-action"}, + }}, + clusterBootstrapped: false, + sourceClusterName: "invalid-target-action-option", + sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, + result: testResult{ + configCount: 1, jobCount: 0, pvcCount: 1, + invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, + expectedClusterCondition: nil, + expectedEventMessage: "Option '--target-action' is not allowed: the operator will automatically set this option", + }, + }, { + desc: "invalid option: link-map", + dataSource: &v1beta1.DataSource{PostgresCluster: &v1beta1.PostgresClusterDataSource{ + ClusterName: "invalid-link-map-option", RepoName: "repo1", + Options: []string{"--link-map"}, + }}, + clusterBootstrapped: false, + sourceClusterName: "invalid-link-map-option", + sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, + result: testResult{ + configCount: 1, jobCount: 0, pvcCount: 1, + invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: true, + expectedClusterCondition: nil, + expectedEventMessage: "Option '--link-map' is not allowed: the operator will automatically set this option", + }, + }, { + desc: "valid option: target-timeline", + dataSource: &v1beta1.DataSource{PostgresCluster: &v1beta1.PostgresClusterDataSource{ + ClusterName: "valid-target-timeline-option", RepoName: "repo1", + Options: []string{"--target-timeline=1"}, + }}, + clusterBootstrapped: false, + sourceClusterName: "valid-target-timeline-option", + sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, + result: testResult{ + configCount: 1, jobCount: 1, pvcCount: 1, + invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: false, + expectedClusterCondition: nil, + expectedCommandPieces: []string{"--stanza=", "--pg1-path=", "--repo=", "--delta", "--target-timeline=1"}, + missingCommandPieces: []string{"--target=", "--target-action=promote"}, + }, + }, { + desc: "valid option: target", + dataSource: &v1beta1.DataSource{PostgresCluster: &v1beta1.PostgresClusterDataSource{ + ClusterName: "valid-target-option", RepoName: "repo1", + Options: []string{"--target=some-date"}, + }}, + clusterBootstrapped: false, + sourceClusterName: "valid-target-option", + sourceClusterRepos: []v1beta1.PGBackRestRepo{{Name: "repo1"}}, + result: testResult{ + configCount: 1, jobCount: 1, pvcCount: 1, + invalidSourceRepo: false, invalidSourceCluster: false, invalidOptions: false, + expectedClusterCondition: nil, + expectedCommandPieces: []string{"--stanza=", "--pg1-path=", "--repo=", "--delta", "--target=some-date", "--target-action=promote"}, }, }, { desc: "cluster bootstrapped init condition missing", @@ -1965,6 +2138,16 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { if len(restoreJobs.Items) == 1 { assert.Assert(t, restoreJobs.Items[0].Labels[naming.LabelStartupInstance] != "") assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestConfigHash] != "") + for _, cmd := range tc.result.expectedCommandPieces { + assert.Assert(t, cmp.Contains( + strings.Join(restoreJobs.Items[0].Spec.Template.Spec.Containers[0].Command, " "), + cmd)) + } + for _, cmd := range tc.result.missingCommandPieces { + assert.Assert(t, !strings.Contains( + strings.Join(restoreJobs.Items[0].Spec.Template.Spec.Containers[0].Command, " "), + cmd)) + } } dataPVCs := &corev1.PersistentVolumeClaimList{} @@ -2002,7 +2185,11 @@ func TestReconcilePostgresClusterDataSource(t *testing.T) { "involvedObject.namespace": namespace, "reason": "InvalidDataSource", }) - return len(events.Items) == 1, err + eventExists := len(events.Items) > 0 + if eventExists { + assert.Assert(t, cmp.Contains(events.Items[0].Message, tc.result.expectedEventMessage)) + } + return eventExists, err })) } }) @@ -2438,10 +2625,90 @@ func TestCopyConfigurationResources(t *testing.T) { } func TestGenerateBackupJobIntent(t *testing.T) { + _, cc := setupKubernetes(t) + require.ParallelCapacity(t, 0) + ns := setupNamespace(t, cc) + + r := &Reconciler{ + Client: cc, + Owner: ControllerName, + } + ctx := context.Background() + cluster := v1beta1.PostgresCluster{} + cluster.Name = "hippo-test" + cluster.Default() + + // If repo.Volume is nil, the code interprets this as a cloud repo backup, + // therefore, an "empty" input results in a job spec for a cloud repo backup t.Run("empty", func(t *testing.T) { - spec := generateBackupJobSpecIntent(ctx, - &v1beta1.PostgresCluster{}, v1beta1.PGBackRestRepo{}, + spec := r.generateBackupJobSpecIntent(ctx, + &cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp + `)) + }) + + t.Run("volumeRepo", func(t *testing.T) { + spec := r.generateBackupJobSpecIntent(ctx, + &cluster, v1beta1.PGBackRestRepo{ + Volume: &v1beta1.RepoPVC{ + VolumeClaimSpec: corev1.PersistentVolumeClaimSpec{}, + }, + }, "", nil, nil, ) @@ -2460,7 +2727,7 @@ containers: value: pgbackrest - name: NAMESPACE - name: SELECTOR - value: postgres-operator.crunchydata.com/cluster=,postgres-operator.crunchydata.com/pgbackrest=,postgres-operator.crunchydata.com/pgbackrest-dedicated= + value: postgres-operator.crunchydata.com/cluster=hippo-test,postgres-operator.crunchydata.com/pgbackrest=,postgres-operator.crunchydata.com/pgbackrest-dedicated= name: pgbackrest resources: {} securityContext: @@ -2493,7 +2760,7 @@ volumes: path: config-hash - key: pgbackrest-server.conf path: ~postgres-operator_server.conf - name: -pgbackrest-config + name: hippo-test-pgbackrest-config - secret: items: - key: pgbackrest.ca-roots @@ -2503,7 +2770,7 @@ volumes: - key: pgbackrest-client.key mode: 384 path: ~postgres-operator/client-tls.key - name: -pgbackrest + name: hippo-test-pgbackrest `)) }) @@ -2513,7 +2780,7 @@ volumes: ImagePullPolicy: corev1.PullAlways, }, } - job := generateBackupJobSpecIntent(ctx, + job := r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, @@ -2528,7 +2795,7 @@ volumes: cluster.Spec.Backups = v1beta1.Backups{ PGBackRest: v1beta1.PGBackRestArchive{}, } - job := generateBackupJobSpecIntent(ctx, + job := r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, @@ -2545,7 +2812,7 @@ volumes: }, }, } - job := generateBackupJobSpecIntent(ctx, + job := r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, @@ -2584,7 +2851,7 @@ volumes: }, }, } - job := generateBackupJobSpecIntent(ctx, + job := r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, @@ -2597,7 +2864,7 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ PriorityClassName: initialize.String("some-priority-class"), } - job := generateBackupJobSpecIntent(ctx, + job := r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, @@ -2615,7 +2882,7 @@ volumes: cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{ Tolerations: tolerations, } - job := generateBackupJobSpecIntent(ctx, + job := r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, @@ -2629,14 +2896,14 @@ volumes: t.Run("Undefined", func(t *testing.T) { cluster.Spec.Backups.PGBackRest.Jobs = nil - spec := generateBackupJobSpecIntent(ctx, + spec := r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) cluster.Spec.Backups.PGBackRest.Jobs = &v1beta1.BackupJobs{} - spec = generateBackupJobSpecIntent(ctx, + spec = r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) assert.Assert(t, spec.TTLSecondsAfterFinished == nil) @@ -2647,7 +2914,7 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(0), } - spec := generateBackupJobSpecIntent(ctx, + spec := r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { @@ -2660,7 +2927,7 @@ volumes: TTLSecondsAfterFinished: initialize.Int32(100), } - spec := generateBackupJobSpecIntent(ctx, + spec := r.generateBackupJobSpecIntent(ctx, cluster, v1beta1.PGBackRestRepo{}, "", nil, nil, ) if assert.Check(t, spec.TTLSecondsAfterFinished != nil) { @@ -2668,6 +2935,164 @@ volumes: } }) }) + + t.Run("CloudLogVolumeAnnotationNoPvc", func(t *testing.T) { + recorder := events.NewRecorder(t, controllerruntime.Scheme) + r.Recorder = recorder + + cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} + cluster.Annotations[naming.PGBackRestCloudLogVolume] = "some-pvc" + spec := r.generateBackupJobSpecIntent(ctx, + &cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp + `)) + + assert.Equal(t, len(recorder.Events), 1) + assert.Equal(t, recorder.Events[0].Regarding.Name, cluster.Name) + assert.Equal(t, recorder.Events[0].Reason, "PGBackRestCloudLogVolumeNotFound") + assert.Equal(t, recorder.Events[0].Note, "persistentvolumeclaims \"some-pvc\" not found") + }) + + t.Run("CloudLogVolumeAnnotationPvcInPlace", func(t *testing.T) { + recorder := events.NewRecorder(t, controllerruntime.Scheme) + r.Recorder = recorder + + cluster.Namespace = ns.Name + cluster.Annotations = map[string]string{} + cluster.Annotations[naming.PGBackRestCloudLogVolume] = "another-pvc" + + pvc := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "another-pvc", + Namespace: ns.Name, + }, + Spec: testVolumeClaimSpec(), + } + err := r.Client.Create(ctx, pvc) + assert.NilError(t, err) + + spec := r.generateBackupJobSpecIntent(ctx, + &cluster, v1beta1.PGBackRestRepo{}, + "", + nil, nil, + ) + assert.Assert(t, cmp.MarshalMatches(spec.Template.Spec, ` +containers: +- command: + - /bin/pgbackrest + - backup + - --stanza=db + - --repo= + name: pgbackrest + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + privileged: false + readOnlyRootFilesystem: true + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + - mountPath: /volumes/another-pvc + name: another-pvc +enableServiceLinks: false +restartPolicy: Never +securityContext: + fsGroup: 26 + fsGroupChangePolicy: OnRootMismatch +volumes: +- name: pgbackrest-config + projected: + sources: + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-test-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-test-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp +- name: another-pvc + persistentVolumeClaim: + claimName: another-pvc + `)) + + // No events created + assert.Equal(t, len(recorder.Events), 0) + }) } func TestGenerateRepoHostIntent(t *testing.T) { @@ -2679,12 +3104,12 @@ func TestGenerateRepoHostIntent(t *testing.T) { t.Run("empty", func(t *testing.T) { _, err := r.generateRepoHostIntent(ctx, &v1beta1.PostgresCluster{}, "", &RepoResources{}, - &observedInstances{}) + &observedInstances{}, "") assert.NilError(t, err) }) cluster := &v1beta1.PostgresCluster{} - sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, &observedInstances{}) + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, &observedInstances{}, "") assert.NilError(t, err) t.Run("ServiceAccount", func(t *testing.T) { @@ -2705,7 +3130,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { }, } observed := &observedInstances{forCluster: []*Instance{{Pods: []*corev1.Pod{{}}}}} - sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed) + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed, "") assert.NilError(t, err) assert.Equal(t, *sts.Spec.Replicas, int32(1)) }) @@ -2717,7 +3142,7 @@ func TestGenerateRepoHostIntent(t *testing.T) { }, } observed := &observedInstances{forCluster: []*Instance{{}}} - sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed) + sts, err := r.generateRepoHostIntent(ctx, cluster, "", &RepoResources{}, observed, "") assert.NilError(t, err) assert.Equal(t, *sts.Spec.Replicas, int32(0)) }) @@ -2805,11 +3230,11 @@ func TestGenerateRestoreJobIntent(t *testing.T) { t.Run(fmt.Sprintf("openshift-%v", openshift), func(t *testing.T) { t.Run("ObjectMeta", func(t *testing.T) { t.Run("Name", func(t *testing.T) { - assert.Equal(t, job.ObjectMeta.Name, + assert.Equal(t, job.Name, naming.PGBackRestRestoreJob(cluster).Name) }) t.Run("Namespace", func(t *testing.T) { - assert.Equal(t, job.ObjectMeta.Namespace, + assert.Equal(t, job.Namespace, naming.PGBackRestRestoreJob(cluster).Namespace) }) t.Run("Annotations", func(t *testing.T) { @@ -2965,14 +3390,23 @@ func TestObserveRestoreEnv(t *testing.T) { }, } + currentTime := metav1.Now() + startTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) + restoreJob.Status.StartTime = &startTime + if completed != nil { if *completed { + restoreJob.Status.CompletionTime = ¤tTime restoreJob.Status.Conditions = append(restoreJob.Status.Conditions, batchv1.JobCondition{ Type: batchv1.JobComplete, Status: corev1.ConditionTrue, Reason: "test", Message: "test", - }) + }, + batchv1.JobCondition{ + Type: batchv1.JobSuccessCriteriaMet, + Status: corev1.ConditionTrue, + }) } else { restoreJob.Status.Conditions = append(restoreJob.Status.Conditions, batchv1.JobCondition{ Type: batchv1.JobComplete, @@ -2988,7 +3422,12 @@ func TestObserveRestoreEnv(t *testing.T) { Status: corev1.ConditionTrue, Reason: "test", Message: "test", - }) + }, + batchv1.JobCondition{ + Type: batchv1.JobFailureTarget, + Status: corev1.ConditionTrue, + }, + ) } else { restoreJob.Status.Conditions = append(restoreJob.Status.Conditions, batchv1.JobCondition{ Type: batchv1.JobFailed, @@ -3018,15 +3457,15 @@ func TestObserveRestoreEnv(t *testing.T) { createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) - fakeLeaderEP.ObjectMeta.Namespace = namespace + fakeLeaderEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) - fakeDCSEP.ObjectMeta.Namespace = namespace + fakeDCSEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) - fakeFailoverEP.ObjectMeta.Namespace = namespace + fakeFailoverEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) job := generateJob(cluster.Name, initialize.Bool(false), initialize.Bool(false)) @@ -3042,15 +3481,15 @@ func TestObserveRestoreEnv(t *testing.T) { createResources: func(t *testing.T, cluster *v1beta1.PostgresCluster) { fakeLeaderEP := &corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) - fakeLeaderEP.ObjectMeta.Namespace = namespace + fakeLeaderEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeLeaderEP)) fakeDCSEP := &corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) - fakeDCSEP.ObjectMeta.Namespace = namespace + fakeDCSEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeDCSEP)) fakeFailoverEP := &corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) - fakeFailoverEP.ObjectMeta.Namespace = namespace + fakeFailoverEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, fakeFailoverEP)) }, result: testResult{ @@ -3220,15 +3659,15 @@ func TestPrepareForRestore(t *testing.T) { cluster *v1beta1.PostgresCluster) (*batchv1.Job, []corev1.Endpoints) { fakeLeaderEP := corev1.Endpoints{} fakeLeaderEP.ObjectMeta = naming.PatroniLeaderEndpoints(cluster) - fakeLeaderEP.ObjectMeta.Namespace = namespace + fakeLeaderEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, &fakeLeaderEP)) fakeDCSEP := corev1.Endpoints{} fakeDCSEP.ObjectMeta = naming.PatroniDistributedConfiguration(cluster) - fakeDCSEP.ObjectMeta.Namespace = namespace + fakeDCSEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, &fakeDCSEP)) fakeFailoverEP := corev1.Endpoints{} fakeFailoverEP.ObjectMeta = naming.PatroniTrigger(cluster) - fakeFailoverEP.ObjectMeta.Namespace = namespace + fakeFailoverEP.Namespace = namespace assert.NilError(t, r.Client.Create(ctx, &fakeFailoverEP)) return nil, []corev1.Endpoints{fakeLeaderEP, fakeDCSEP, fakeFailoverEP} }, diff --git a/internal/controller/postgrescluster/pgbouncer.go b/internal/controller/postgrescluster/pgbouncer.go index 76207fac02..35053d0f99 100644 --- a/internal/controller/postgrescluster/pgbouncer.go +++ b/internal/controller/postgrescluster/pgbouncer.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/pgbouncer_test.go b/internal/controller/postgrescluster/pgbouncer_test.go index 9bbced5247..e18388882c 100644 --- a/internal/controller/postgrescluster/pgbouncer_test.go +++ b/internal/controller/postgrescluster/pgbouncer_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -6,10 +6,10 @@ package postgrescluster import ( "context" + "errors" "strconv" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -105,12 +105,12 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "postgres-operator.crunchydata.com/cluster": "pg7", "postgres-operator.crunchydata.com/role": "pgbouncer", @@ -136,13 +136,13 @@ ownerReferences: assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, service.Annotations, map[string]string{ "a": "v1", "c": "v3", }) // Labels present in the metadata. - assert.DeepEqual(t, service.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, service.Labels, map[string]string{ "b": "v2", "d": "v4", "postgres-operator.crunchydata.com/cluster": "pg7", @@ -420,12 +420,12 @@ namespace: ns3 assert.Assert(t, specified) // Annotations present in the metadata. - assert.DeepEqual(t, deploy.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, deploy.Annotations, map[string]string{ "a": "v1", }) // Labels present in the metadata. - assert.DeepEqual(t, deploy.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, deploy.Labels, map[string]string{ "b": "v2", "postgres-operator.crunchydata.com/cluster": "test-cluster", "postgres-operator.crunchydata.com/role": "pgbouncer", diff --git a/internal/controller/postgrescluster/pgmonitor.go b/internal/controller/postgrescluster/pgmonitor.go index e1b5186cb4..a4a6ff64f0 100644 --- a/internal/controller/postgrescluster/pgmonitor.go +++ b/internal/controller/postgrescluster/pgmonitor.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -346,7 +346,7 @@ func addPGMonitorExporterToInstancePodSpec( }, }, } - configVolume.VolumeSource.Projected.Sources = append(configVolume.VolumeSource.Projected.Sources, + configVolume.Projected.Sources = append(configVolume.Projected.Sources, defaultConfigVolumeProjection) } diff --git a/internal/controller/postgrescluster/pgmonitor_test.go b/internal/controller/postgrescluster/pgmonitor_test.go index 0432ee15d1..36a5027aaa 100644 --- a/internal/controller/postgrescluster/pgmonitor_test.go +++ b/internal/controller/postgrescluster/pgmonitor_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -358,7 +358,7 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { Pods: []*corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Name: "daisy-pod", - Annotations: map[string]string{"status": `{"role":"master"}`}, + Annotations: map[string]string{"status": `{"role":"primary"}`}, DeletionTimestamp: &metav1.Time{}, }, }}, @@ -388,7 +388,7 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { Pods: []*corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Name: "daisy-pod", - Annotations: map[string]string{"status": `{"role":"master"}`}, + Annotations: map[string]string{"status": `{"role":"primary"}`}, }, }}, Runner: &appsv1.StatefulSet{}, @@ -410,7 +410,7 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { Pods: []*corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Name: "daisy-pod", - Annotations: map[string]string{"status": `{"role":"master"}`}, + Annotations: map[string]string{"status": `{"role":"primary"}`}, }, Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{{ @@ -438,7 +438,7 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { Pods: []*corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Name: "daisy-pod", - Annotations: map[string]string{"status": `{"role":"master"}`}, + Annotations: map[string]string{"status": `{"role":"primary"}`}, }, Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{{ @@ -469,7 +469,7 @@ func TestReconcilePGMonitorExporterSetupErrors(t *testing.T) { Pods: []*corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Name: "daisy-pod", - Annotations: map[string]string{"status": `{"role":"master"}`}, + Annotations: map[string]string{"status": `{"role":"primary"}`}, }, Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{{ @@ -536,7 +536,7 @@ func TestReconcilePGMonitorExporter(t *testing.T) { Pods: []*corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Name: "one-daisy-pod", - Annotations: map[string]string{"status": `{"role":"master"}`}, + Annotations: map[string]string{"status": `{"role":"primary"}`}, }, Status: corev1.PodStatus{ Phase: corev1.PodRunning, @@ -602,7 +602,7 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { podExecCalled: false, // Status was generated manually for this test case // TODO (jmckulk): add code to generate status - status: v1beta1.MonitoringStatus{ExporterConfiguration: "7cdb484b6c"}, + status: v1beta1.MonitoringStatus{ExporterConfiguration: "5c5f955485"}, statusChangedAfterReconcile: false, }} { t.Run(test.name, func(t *testing.T) { @@ -634,7 +634,7 @@ func TestReconcilePGMonitorExporterStatus(t *testing.T) { Pods: []*corev1.Pod{{ ObjectMeta: metav1.ObjectMeta{ Name: "daisy-pod", - Annotations: map[string]string{"status": `{"role":"master"}`}, + Annotations: map[string]string{"status": `{"role":"primary"}`}, }, Status: corev1.PodStatus{ ContainerStatuses: []corev1.ContainerStatus{{ diff --git a/internal/controller/postgrescluster/pki.go b/internal/controller/postgrescluster/pki.go index 0314ad4406..d52d6a75da 100644 --- a/internal/controller/postgrescluster/pki.go +++ b/internal/controller/postgrescluster/pki.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -63,7 +63,7 @@ func (r *Reconciler) reconcileRootCertificate( intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) intent.Namespace, intent.Name = cluster.Namespace, naming.RootCertSecret intent.Data = make(map[string][]byte) - intent.ObjectMeta.OwnerReferences = existing.ObjectMeta.OwnerReferences + intent.OwnerReferences = existing.OwnerReferences // A root secret is scoped to the namespace where postgrescluster(s) // are deployed. For operator deployments with postgresclusters in more than @@ -140,7 +140,7 @@ func (r *Reconciler) reconcileClusterCertificate( intent := &corev1.Secret{ObjectMeta: naming.PostgresTLSSecret(cluster)} intent.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) intent.Data = make(map[string][]byte) - intent.ObjectMeta.OwnerReferences = existing.ObjectMeta.OwnerReferences + intent.OwnerReferences = existing.OwnerReferences intent.Annotations = naming.Merge(cluster.Spec.Metadata.GetAnnotationsOrNil()) intent.Labels = naming.Merge( diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index c2fe7af82a..0cb5f15a99 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -12,7 +12,6 @@ import ( "strings" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -90,7 +89,7 @@ func TestReconcileCerts(t *testing.T) { err := tClient.Get(ctx, client.ObjectKeyFromObject(rootSecret), rootSecret) assert.NilError(t, err) - assert.Check(t, len(rootSecret.ObjectMeta.OwnerReferences) == 1, "first owner reference not set") + assert.Check(t, len(rootSecret.OwnerReferences) == 1, "first owner reference not set") expectedOR := metav1.OwnerReference{ APIVersion: "postgres-operator.crunchydata.com/v1beta1", @@ -99,8 +98,8 @@ func TestReconcileCerts(t *testing.T) { UID: cluster1.UID, } - if len(rootSecret.ObjectMeta.OwnerReferences) > 0 { - assert.Equal(t, rootSecret.ObjectMeta.OwnerReferences[0], expectedOR) + if len(rootSecret.OwnerReferences) > 0 { + assert.Equal(t, rootSecret.OwnerReferences[0], expectedOR) } }) @@ -115,7 +114,7 @@ func TestReconcileCerts(t *testing.T) { clist := &v1beta1.PostgresClusterList{} assert.NilError(t, tClient.List(ctx, clist)) - assert.Check(t, len(rootSecret.ObjectMeta.OwnerReferences) == 2, "second owner reference not set") + assert.Check(t, len(rootSecret.OwnerReferences) == 2, "second owner reference not set") expectedOR := metav1.OwnerReference{ APIVersion: "postgres-operator.crunchydata.com/v1beta1", @@ -124,8 +123,8 @@ func TestReconcileCerts(t *testing.T) { UID: cluster2.UID, } - if len(rootSecret.ObjectMeta.OwnerReferences) > 1 { - assert.Equal(t, rootSecret.ObjectMeta.OwnerReferences[1], expectedOR) + if len(rootSecret.OwnerReferences) > 1 { + assert.Equal(t, rootSecret.OwnerReferences[1], expectedOR) } }) @@ -145,8 +144,7 @@ func TestReconcileCerts(t *testing.T) { emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) - err = errors.WithStack(r.apply(ctx, emptyRootSecret)) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, emptyRootSecret)) // reconcile the root cert secret, creating a new root cert returnedRoot, err := r.reconcileRootCertificate(ctx, cluster1) @@ -206,7 +204,7 @@ func TestReconcileCerts(t *testing.T) { emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) - err = errors.WithStack(r.apply(ctx, emptyRootSecret)) + assert.NilError(t, r.apply(ctx, emptyRootSecret)) // reconcile the root cert secret newRootCert, err := r.reconcileRootCertificate(ctx, cluster1) @@ -303,7 +301,7 @@ func TestReconcileCerts(t *testing.T) { testSecret := &corev1.Secret{} testSecret.Namespace, testSecret.Name = namespace, "newcustomsecret" // simulate cluster spec update - cluster2.Spec.CustomTLSSecret.LocalObjectReference.Name = "newcustomsecret" + cluster2.Spec.CustomTLSSecret.Name = "newcustomsecret" // get the expected secret projection testSecretProjection := clusterCertSecretProjection(testSecret) @@ -331,8 +329,7 @@ func TestReconcileCerts(t *testing.T) { emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) - err = errors.WithStack(r.apply(ctx, emptyRootSecret)) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, emptyRootSecret)) // reconcile the root cert secret, creating a new root cert returnedRoot, err := r.reconcileRootCertificate(ctx, cluster1) @@ -392,7 +389,7 @@ func getCertFromSecret( // get the cert from the secret secretCRT, ok := secret.Data[dataKey] if !ok { - return nil, errors.New(fmt.Sprintf("could not retrieve %s", dataKey)) + return nil, fmt.Errorf("could not retrieve %s", dataKey) } // parse the cert from binary encoded data diff --git a/internal/controller/postgrescluster/pod_disruption_budget.go b/internal/controller/postgrescluster/pod_disruption_budget.go index 4bff4a9743..80ad33b55e 100644 --- a/internal/controller/postgrescluster/pod_disruption_budget.go +++ b/internal/controller/postgrescluster/pod_disruption_budget.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/pod_disruption_budget_test.go b/internal/controller/postgrescluster/pod_disruption_budget_test.go index 55e2bb63c6..6463068d4c 100644 --- a/internal/controller/postgrescluster/pod_disruption_budget_test.go +++ b/internal/controller/postgrescluster/pod_disruption_budget_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index 312079d824..f8987b8332 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -429,7 +429,7 @@ func (r *Reconciler) reconcilePostgresUserSecrets( // If both secrets have "pguser" or neither have "pguser", // sort by creation timestamp - return secrets.Items[i].CreationTimestamp.Time.After(secrets.Items[j].CreationTimestamp.Time) + return secrets.Items[i].CreationTimestamp.After(secrets.Items[j].CreationTimestamp.Time) }) // Index secrets by PostgreSQL user name and delete any that are not in the @@ -502,11 +502,11 @@ func (r *Reconciler) reconcilePostgresUsersInPostgreSQL( running, known := instance.IsRunning(container) if running && known && len(instance.Pods) > 0 { pod := instance.Pods[0] - ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) podExecutor = func( ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { + ctx = logging.NewContext(ctx, logging.FromContext(ctx).WithValues("pod", pod.Name)) return r.PodExec(ctx, pod.Namespace, pod.Name, container, stdin, stdout, stderr, command...) } break diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index 0780b0f577..9e52bc38c1 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -6,14 +6,16 @@ package postgrescluster import ( "context" + "errors" "fmt" "io" + "os" + "strings" "testing" "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -282,6 +284,9 @@ volumeMode: Filesystem }) t.Run("DataVolumeSourceClusterWithGoodSnapshot", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } cluster := testCluster() ns := setupNamespace(t, tClient) cluster.Namespace = ns.Name @@ -951,7 +956,7 @@ func TestReconcileDatabaseInitSQL(t *testing.T) { Namespace: ns.Name, Name: "pod", Annotations: map[string]string{ - "status": `{"role":"master"}`, + "status": `{"role":"primary"}`, }, }, Status: corev1.PodStatus{ @@ -1072,7 +1077,7 @@ func TestReconcileDatabaseInitSQLConfigMap(t *testing.T) { Namespace: ns.Name, Name: "pod", Annotations: map[string]string{ - "status": `{"role":"master"}`, + "status": `{"role":"primary"}`, }, }, Status: corev1.PodStatus{ diff --git a/internal/controller/postgrescluster/rbac.go b/internal/controller/postgrescluster/rbac.go index 38dd808c44..55b685d1c3 100644 --- a/internal/controller/postgrescluster/rbac.go +++ b/internal/controller/postgrescluster/rbac.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/snapshots.go b/internal/controller/postgrescluster/snapshots.go index 4f5eff817a..281cab2d39 100644 --- a/internal/controller/postgrescluster/snapshots.go +++ b/internal/controller/postgrescluster/snapshots.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -10,14 +10,13 @@ import ( "strings" "time" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "github.com/pkg/errors" batchv1 "k8s.io/api/batch/v1" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - "github.com/crunchydata/postgres-operator/internal/config" "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" @@ -100,7 +99,7 @@ func (r *Reconciler) reconcileVolumeSnapshots(ctx context.Context, r.Recorder.Event(postgrescluster, corev1.EventTypeWarning, "VolumeSnapshotError", *snapshotWithLatestError.Status.Error.Message) for _, snapshot := range snapshots.Items { - if snapshot.Status.Error != nil && + if snapshot.Status != nil && snapshot.Status.Error != nil && snapshot.Status.Error.Time.Before(snapshotWithLatestError.Status.Error.Time) { err = r.deleteControlled(ctx, postgrescluster, &snapshot) if err != nil { @@ -534,7 +533,7 @@ func getSnapshotWithLatestError(snapshots *volumesnapshotv1.VolumeSnapshotList) }, } for _, snapshot := range snapshots.Items { - if snapshot.Status.Error != nil && + if snapshot.Status != nil && snapshot.Status.Error != nil && snapshotWithLatestError.Status.Error.Time.Before(snapshot.Status.Error.Time) { snapshotWithLatestError = snapshot } @@ -574,7 +573,7 @@ func getLatestReadySnapshot(snapshots *volumesnapshotv1.VolumeSnapshotList) *vol }, } for _, snapshot := range snapshots.Items { - if snapshot.Status.ReadyToUse != nil && *snapshot.Status.ReadyToUse && + if snapshot.Status != nil && snapshot.Status.ReadyToUse != nil && *snapshot.Status.ReadyToUse && latestReadySnapshot.Status.CreationTime.Before(snapshot.Status.CreationTime) { latestReadySnapshot = snapshot } diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go index 455b1b1581..b73ad76ded 100644 --- a/internal/controller/postgrescluster/snapshots_test.go +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -6,10 +6,12 @@ package postgrescluster import ( "context" + "os" + "strings" "testing" "time" - "github.com/pkg/errors" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -28,8 +30,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/testing/events" "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ) func TestReconcileVolumeSnapshots(t *testing.T) { @@ -59,7 +59,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { // Create cluster (without snapshots spec) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -72,34 +72,29 @@ func TestReconcileVolumeSnapshots(t *testing.T) { volumeSnapshotClassName := "my-snapshotclass" snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) assert.NilError(t, err) - err = errors.WithStack(r.apply(ctx, snapshot)) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot)) // Get all snapshots for this cluster and assert 1 exists selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 1) // Reconcile snapshots - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Get all snapshots for this cluster and assert 0 exist - assert.NilError(t, err) snapshots = &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 0) }) @@ -131,8 +126,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { } // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert warning event was created and has expected attributes if assert.Check(t, len(recorder.Events) > 0) { @@ -173,23 +167,24 @@ func TestReconcileVolumeSnapshots(t *testing.T) { } // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert no snapshots exist selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 0) }) t.Run("SnapshotsEnabledReadySnapshotsExist", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } // Create a volume snapshot class volumeSnapshotClassName := "my-snapshotclass" volumeSnapshotClass := &volumesnapshotv1.VolumeSnapshotClass{ @@ -204,7 +199,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { // Create a cluster with snapshots enabled cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } @@ -244,18 +239,15 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, }, } - err := errors.WithStack(r.setControllerReference(cluster, snapshot1)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, snapshot1)) + assert.NilError(t, r.apply(ctx, snapshot1)) // Update snapshot status truePtr := initialize.Bool(true) snapshot1.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - err = r.Client.Status().Update(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, snapshot1)) // Create second snapshot with different annotation value snapshot2 := &volumesnapshotv1.VolumeSnapshot{ @@ -279,38 +271,32 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, }, } - err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, r.apply(ctx, snapshot2)) // Update second snapshot's status snapshot2.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - err = r.Client.Status().Update(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, snapshot2)) // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert first snapshot exists and second snapshot was deleted selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 1) assert.Equal(t, snapshots.Items[0].Name, "first-snapshot") // Cleanup - err = r.deleteControlled(ctx, cluster, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.deleteControlled(ctx, cluster, snapshot1)) }) t.Run("SnapshotsEnabledCreateSnapshot", func(t *testing.T) { @@ -328,7 +314,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { // Create a cluster with snapshots enabled cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: volumeSnapshotClassName, } @@ -347,19 +333,17 @@ func TestReconcileVolumeSnapshots(t *testing.T) { } // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert that a snapshot was created selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 1) assert.Equal(t, snapshots.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], "another-backup-timestamp") @@ -392,7 +376,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" assert.NilError(t, r.Client.Create(ctx, cluster)) t.Cleanup(func() { assert.Check(t, r.Client.Delete(ctx, cluster)) }) @@ -413,21 +397,18 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { }, Spec: testVolumeClaimSpec(), } - err = errors.WithStack(r.setControllerReference(cluster, pvc)) - assert.NilError(t, err) - err = r.apply(ctx, pvc) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, pvc)) + assert.NilError(t, r.apply(ctx, pvc)) // Assert that the pvc was created selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) - assert.NilError(t, err) assert.Equal(t, len(pvcs.Items), 1) // Create volumes for reconcile @@ -452,7 +433,7 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } @@ -471,21 +452,23 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) - assert.NilError(t, err) assert.Equal(t, len(pvcs.Items), 1) }) t.Run("SnapshotsEnabledBackupExistsCreateRestore", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } // Create cluster with snapshots enabled ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } @@ -494,18 +477,13 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) - err = errors.WithStack(r.setControllerReference(cluster, backupJob)) - assert.NilError(t, err) - err = r.apply(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, backupJob)) + assert.NilError(t, r.apply(ctx, backupJob)) currentTime := metav1.Now() - backupJob.Status = batchv1.JobStatus{ - Succeeded: 1, - CompletionTime: ¤tTime, - } - err = r.Client.Status().Update(ctx, backupJob) - assert.NilError(t, err) + startTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) + backupJob.Status = succeededJobStatus(startTime, currentTime) + assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -521,22 +499,24 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { restoreJobs := &batchv1.JobList{} selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) - assert.NilError(t, err) assert.Equal(t, len(restoreJobs.Items), 1) assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion] != "") }) t.Run("SnapshotsEnabledSuccessfulRestoreExists", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } // Create cluster with snapshots enabled ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } @@ -545,38 +525,28 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create times for jobs currentTime := metav1.Now() + currentStartTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + earlierStartTime := metav1.NewTime(earlierTime.AddDate(0, 0, -1)) // Create successful backup job backupJob := testBackupJob(cluster) - err = errors.WithStack(r.setControllerReference(cluster, backupJob)) - assert.NilError(t, err) - err = r.apply(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, backupJob)) + assert.NilError(t, r.apply(ctx, backupJob)) - backupJob.Status = batchv1.JobStatus{ - Succeeded: 1, - CompletionTime: &earlierTime, - } - err = r.Client.Status().Update(ctx, backupJob) - assert.NilError(t, err) + backupJob.Status = succeededJobStatus(earlierStartTime, earlierTime) + assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) // Create successful restore job restoreJob := testRestoreJob(cluster) restoreJob.Annotations = map[string]string{ naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } - err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, err) - err = r.apply(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, r.apply(ctx, restoreJob)) - restoreJob.Status = batchv1.JobStatus{ - Succeeded: 1, - CompletionTime: ¤tTime, - } - err = r.Client.Status().Update(ctx, restoreJob) - assert.NilError(t, err) + restoreJob.Status = succeededJobStatus(currentStartTime, currentTime) + assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -592,12 +562,11 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { restoreJobs := &batchv1.JobList{} selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) - assert.NilError(t, err) assert.Equal(t, len(restoreJobs.Items), 0) // Assert pvc was annotated @@ -605,11 +574,14 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { }) t.Run("SnapshotsEnabledFailedRestoreExists", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } // Create cluster with snapshots enabled ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" cluster.Spec.Backups.Snapshots = &v1beta1.VolumeSnapshots{ VolumeSnapshotClassName: "my-snapshotclass", } @@ -619,38 +591,29 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create times for jobs currentTime := metav1.Now() earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + startTime := metav1.NewTime(earlierTime.AddDate(0, 0, -1)) // Create successful backup job backupJob := testBackupJob(cluster) - err = errors.WithStack(r.setControllerReference(cluster, backupJob)) - assert.NilError(t, err) - err = r.apply(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, backupJob)) + assert.NilError(t, r.apply(ctx, backupJob)) - backupJob.Status = batchv1.JobStatus{ - Succeeded: 1, - CompletionTime: &earlierTime, - } - err = r.Client.Status().Update(ctx, backupJob) - assert.NilError(t, err) + backupJob.Status = succeededJobStatus(startTime, earlierTime) + assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) // Create failed restore job restoreJob := testRestoreJob(cluster) restoreJob.Annotations = map[string]string{ naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } - err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, err) - err = r.apply(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, r.apply(ctx, restoreJob)) restoreJob.Status = batchv1.JobStatus{ - Succeeded: 0, - Failed: 1, - CompletionTime: ¤tTime, + Succeeded: 0, + Failed: 1, } - err = r.Client.Status().Update(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) // Setup instances and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -685,7 +648,7 @@ func TestCreateDedicatedSnapshotVolume(t *testing.T) { ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" labelMap := map[string]string{ naming.LabelCluster: cluster.Name, @@ -713,7 +676,7 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { ns := setupNamespace(t, cc) cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" pvc := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ @@ -727,19 +690,17 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { backupJob := testBackupJob(cluster) backupJob.Status.CompletionTime = ¤tTime - err := r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob)) // Assert a restore job was created that has the correct annotation jobs := &batchv1.JobList{} selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, jobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) - assert.NilError(t, err) assert.Equal(t, len(jobs.Items), 1) assert.Equal(t, jobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], backupJob.Status.CompletionTime.Format(time.RFC3339)) @@ -801,7 +762,7 @@ func TestGenerateVolumeSnapshot(t *testing.T) { assert.Equal(t, *snapshot.Spec.VolumeSnapshotClassName, "my-snapshot") assert.Equal(t, *snapshot.Spec.Source.PersistentVolumeClaimName, "dedicated-snapshot-volume") assert.Equal(t, snapshot.Labels[naming.LabelCluster], "hippo") - assert.Equal(t, snapshot.ObjectMeta.OwnerReferences[0].Name, "hippo") + assert.Equal(t, snapshot.OwnerReferences[0].Name, "hippo") } func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { @@ -827,9 +788,7 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { t.Run("NoDsvRestoreJobs", func(t *testing.T) { job1 := testRestoreJob(cluster) job1.Namespace = ns.Name - - err := r.apply(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job1)) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) assert.NilError(t, err) @@ -843,16 +802,12 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { job2.Annotations = map[string]string{ naming.PGBackRestBackupJobCompletion: "backup-timestamp", } - - err := r.apply(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job2)) job3 := testRestoreJob(cluster) job3.Name = "restore-job-3" job3.Namespace = ns.Name - - err = r.apply(ctx, job3) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job3)) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) assert.NilError(t, err) @@ -864,7 +819,6 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { func TestGetLatestCompleteBackupJob(t *testing.T) { ctx := context.Background() _, cc := setupKubernetes(t) - // require.ParallelCapacity(t, 1) r := &Reconciler{ Client: cc, @@ -884,9 +838,7 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { t.Run("NoCompleteJobs", func(t *testing.T) { job1 := testBackupJob(cluster) job1.Namespace = ns.Name - - err := r.apply(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job1)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -894,31 +846,26 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { }) t.Run("OneCompleteBackupJob", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } currentTime := metav1.Now() + currentStartTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) job1 := testBackupJob(cluster) job1.Namespace = ns.Name - - err := r.apply(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job1)) job2 := testBackupJob(cluster) job2.Namespace = ns.Name job2.Name = "backup-job-2" - - err = r.apply(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job2)) // Get job1 and update Status. - err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) - job1.Status = batchv1.JobStatus{ - Succeeded: 1, - CompletionTime: ¤tTime, - } - err = r.Client.Status().Update(ctx, job1) - assert.NilError(t, err) + job1.Status = succeededJobStatus(currentStartTime, currentTime) + assert.NilError(t, r.Client.Status().Update(ctx, job1)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -926,44 +873,33 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { }) t.Run("TwoCompleteBackupJobs", func(t *testing.T) { + if strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") { + t.Skip("requires mocking of Job conditions") + } currentTime := metav1.Now() + currentStartTime := metav1.NewTime(currentTime.AddDate(0, 0, -1)) earlierTime := metav1.NewTime(currentTime.AddDate(-1, 0, 0)) + earlierStartTime := metav1.NewTime(earlierTime.AddDate(0, 0, -1)) assert.Check(t, earlierTime.Before(¤tTime)) job1 := testBackupJob(cluster) job1.Namespace = ns.Name - - err := r.apply(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job1)) job2 := testBackupJob(cluster) job2.Namespace = ns.Name job2.Name = "backup-job-2" - - err = r.apply(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job2)) // Get job1 and update Status. - err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) - assert.NilError(t, err) - - job1.Status = batchv1.JobStatus{ - Succeeded: 1, - CompletionTime: ¤tTime, - } - err = r.Client.Status().Update(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) + job1.Status = succeededJobStatus(currentStartTime, currentTime) + assert.NilError(t, r.Client.Status().Update(ctx, job1)) // Get job2 and update Status. - err = r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2) - assert.NilError(t, err) - - job2.Status = batchv1.JobStatus{ - Succeeded: 1, - CompletionTime: &earlierTime, - } - err = r.Client.Status().Update(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2)) + job2.Status = succeededJobStatus(earlierStartTime, earlierTime) + assert.NilError(t, r.Client.Status().Update(ctx, job2)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -978,6 +914,17 @@ func TestGetSnapshotWithLatestError(t *testing.T) { assert.Check(t, snapshotWithLatestError == nil) }) + t.Run("NoSnapshotsWithStatus", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + {}, + {}, + }, + } + snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) + assert.Check(t, snapshotWithLatestError == nil) + }) + t.Run("NoSnapshotsWithErrors", func(t *testing.T) { snapshotList := &volumesnapshotv1.VolumeSnapshotList{ Items: []volumesnapshotv1.VolumeSnapshot{ @@ -1027,7 +974,7 @@ func TestGetSnapshotWithLatestError(t *testing.T) { }, } snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) - assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "bad-snapshot") + assert.Equal(t, snapshotWithLatestError.Name, "bad-snapshot") }) t.Run("TwoSnapshotsWithErrors", func(t *testing.T) { @@ -1062,7 +1009,7 @@ func TestGetSnapshotWithLatestError(t *testing.T) { }, } snapshotWithLatestError := getSnapshotWithLatestError(snapshotList) - assert.Equal(t, snapshotWithLatestError.ObjectMeta.Name, "second-bad-snapshot") + assert.Equal(t, snapshotWithLatestError.Name, "second-bad-snapshot") }) } @@ -1102,8 +1049,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - err := r.apply(ctx, snapshot) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1126,8 +1072,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - err := r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshot2 := &volumesnapshotv1.VolumeSnapshot{ TypeMeta: metav1.TypeMeta{ @@ -1144,8 +1089,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot2)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1169,8 +1113,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot1.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot1.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - err := r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshot2 := &volumesnapshotv1.VolumeSnapshot{ TypeMeta: metav1.TypeMeta{ @@ -1187,8 +1130,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot2)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1203,6 +1145,17 @@ func TestGetLatestReadySnapshot(t *testing.T) { assert.Assert(t, latestReadySnapshot == nil) }) + t.Run("NoSnapshotsWithStatus", func(t *testing.T) { + snapshotList := &volumesnapshotv1.VolumeSnapshotList{ + Items: []volumesnapshotv1.VolumeSnapshot{ + {}, + {}, + }, + } + latestReadySnapshot := getLatestReadySnapshot(snapshotList) + assert.Assert(t, latestReadySnapshot == nil) + }) + t.Run("NoReadySnapshots", func(t *testing.T) { snapshotList := &volumesnapshotv1.VolumeSnapshotList{ Items: []volumesnapshotv1.VolumeSnapshot{ @@ -1250,7 +1203,7 @@ func TestGetLatestReadySnapshot(t *testing.T) { }, } latestReadySnapshot := getLatestReadySnapshot(snapshotList) - assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "good-snapshot") + assert.Equal(t, latestReadySnapshot.Name, "good-snapshot") }) t.Run("TwoReadySnapshots", func(t *testing.T) { @@ -1281,7 +1234,7 @@ func TestGetLatestReadySnapshot(t *testing.T) { }, } latestReadySnapshot := getLatestReadySnapshot(snapshotList) - assert.Equal(t, latestReadySnapshot.ObjectMeta.Name, "second-good-snapshot") + assert.Equal(t, latestReadySnapshot.Name, "second-good-snapshot") }) } @@ -1300,13 +1253,13 @@ func TestDeleteSnapshots(t *testing.T) { cluster := testCluster() cluster.Namespace = ns.Name - cluster.ObjectMeta.UID = "the-uid-123" + cluster.UID = "the-uid-123" assert.NilError(t, r.Client.Create(ctx, cluster)) rhinoCluster := testCluster() rhinoCluster.Name = "rhino" rhinoCluster.Namespace = ns.Name - rhinoCluster.ObjectMeta.UID = "the-uid-456" + rhinoCluster.UID = "the-uid-456" assert.NilError(t, r.Client.Create(ctx, rhinoCluster)) t.Cleanup(func() { @@ -1337,24 +1290,20 @@ func TestDeleteSnapshots(t *testing.T) { }, }, } - err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshotList := &volumesnapshotv1.VolumeSnapshotList{ Items: []volumesnapshotv1.VolumeSnapshot{ *snapshot1, }, } - err = r.deleteSnapshots(ctx, cluster, snapshotList) - assert.NilError(t, err) + assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshotList)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) - assert.NilError(t, err) assert.Equal(t, len(existingSnapshots.Items), 1) }) @@ -1375,10 +1324,8 @@ func TestDeleteSnapshots(t *testing.T) { }, }, } - err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshot2 := &volumesnapshotv1.VolumeSnapshot{ TypeMeta: metav1.TypeMeta{ @@ -1395,24 +1342,20 @@ func TestDeleteSnapshots(t *testing.T) { }, }, } - err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, r.apply(ctx, snapshot2)) snapshotList := &volumesnapshotv1.VolumeSnapshotList{ Items: []volumesnapshotv1.VolumeSnapshot{ *snapshot1, *snapshot2, }, } - err = r.deleteSnapshots(ctx, cluster, snapshotList) - assert.NilError(t, err) + assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshotList)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) - assert.NilError(t, err) assert.Equal(t, len(existingSnapshots.Items), 1) assert.Equal(t, existingSnapshots.Items[0].Name, "first-snapshot") }) @@ -1452,3 +1395,21 @@ func TestClusterUsingTablespaces(t *testing.T) { assert.Assert(t, clusterUsingTablespaces(ctx, cluster)) }) } + +func succeededJobStatus(startTime, completionTime metav1.Time) batchv1.JobStatus { + return batchv1.JobStatus{ + Succeeded: 1, + StartTime: &startTime, + CompletionTime: &completionTime, + Conditions: []batchv1.JobCondition{ + { + Type: batchv1.JobSuccessCriteriaMet, + Status: corev1.ConditionTrue, + }, + { + Type: batchv1.JobComplete, + Status: corev1.ConditionTrue, + }, + }, + } +} diff --git a/internal/controller/postgrescluster/suite_test.go b/internal/controller/postgrescluster/suite_test.go index 2a0e3d76ec..d7f8bece32 100644 --- a/internal/controller/postgrescluster/suite_test.go +++ b/internal/controller/postgrescluster/suite_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -15,9 +15,7 @@ import ( . "github.com/onsi/gomega" "k8s.io/apimachinery/pkg/util/version" "k8s.io/client-go/discovery" - - // Google Kubernetes Engine / Google Cloud Platform authentication provider - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" + _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" // Google Kubernetes Engine / Google Cloud Platform authentication provider "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/envtest" diff --git a/internal/controller/postgrescluster/topology.go b/internal/controller/postgrescluster/topology.go index 58778be907..9f28739267 100644 --- a/internal/controller/postgrescluster/topology.go +++ b/internal/controller/postgrescluster/topology.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/topology_test.go b/internal/controller/postgrescluster/topology_test.go index 40c8c0dd7f..ec5d1fe137 100644 --- a/internal/controller/postgrescluster/topology_test.go +++ b/internal/controller/postgrescluster/topology_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/util.go b/internal/controller/postgrescluster/util.go index 25120ab574..bb5b3e085a 100644 --- a/internal/controller/postgrescluster/util.go +++ b/internal/controller/postgrescluster/util.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/util_test.go b/internal/controller/postgrescluster/util_test.go index 51a32f1e85..8e7d5c434f 100644 --- a/internal/controller/postgrescluster/util_test.go +++ b/internal/controller/postgrescluster/util_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -79,7 +79,7 @@ func TestAddDevSHM(t *testing.T) { // check there is an empty dir mounted under the dshm volume for _, v := range template.Spec.Volumes { - if v.Name == "dshm" && v.VolumeSource.EmptyDir != nil && v.VolumeSource.EmptyDir.Medium == corev1.StorageMediumMemory { + if v.Name == "dshm" && v.EmptyDir != nil && v.EmptyDir.Medium == corev1.StorageMediumMemory { found = true break } @@ -221,15 +221,14 @@ func TestAddNSSWrapper(t *testing.T) { // Each container that requires the nss_wrapper envs should be updated var actualUpdatedContainerCount int for i, c := range template.Spec.Containers { - if c.Name == naming.ContainerDatabase || - c.Name == naming.PGBackRestRepoContainerName || - c.Name == naming.PGBackRestRestoreContainerName { + switch c.Name { + case naming.ContainerDatabase, naming.PGBackRestRepoContainerName, naming.PGBackRestRestoreContainerName: assert.DeepEqual(t, expectedEnv, c.Env) actualUpdatedContainerCount++ - } else if c.Name == "pgadmin" { + case "pgadmin": assert.DeepEqual(t, expectedPGAdminEnv, c.Env) actualUpdatedContainerCount++ - } else { + default: assert.DeepEqual(t, beforeAddNSS[i], c) } } diff --git a/internal/controller/postgrescluster/volumes.go b/internal/controller/postgrescluster/volumes.go index e40710d4ff..962b8878a4 100644 --- a/internal/controller/postgrescluster/volumes.go +++ b/internal/controller/postgrescluster/volumes.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -220,7 +220,7 @@ func (r *Reconciler) configureExistingPGVolumes( Spec: cluster.Spec.InstanceSets[0].DataVolumeClaimSpec, } - volume.ObjectMeta.Labels = map[string]string{ + volume.Labels = map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelInstanceSet: cluster.Spec.InstanceSets[0].Name, naming.LabelInstance: instanceName, @@ -273,7 +273,7 @@ func (r *Reconciler) configureExistingPGWALVolume( Spec: cluster.Spec.InstanceSets[0].DataVolumeClaimSpec, } - volume.ObjectMeta.Labels = map[string]string{ + volume.Labels = map[string]string{ naming.LabelCluster: cluster.Name, naming.LabelInstanceSet: cluster.Spec.InstanceSets[0].Name, naming.LabelInstance: instanceName, @@ -427,14 +427,14 @@ func (r *Reconciler) reconcileMovePGDataDir(ctx context.Context, // at this point, the Job either wasn't found or it has failed, so the it // should be created - moveDirJob.ObjectMeta.Annotations = naming.Merge(cluster.Spec.Metadata. + moveDirJob.Annotations = naming.Merge(cluster.Spec.Metadata. GetAnnotationsOrNil()) labels := naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), naming.DirectoryMoveJobLabels(cluster.Name), map[string]string{ naming.LabelMovePGDataDir: "", }) - moveDirJob.ObjectMeta.Labels = labels + moveDirJob.Labels = labels // `patroni.dynamic.json` holds the previous state of the DCS. Since we are // migrating the volumes, we want to clear out any obsolete configuration info. @@ -548,14 +548,14 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, } } - moveDirJob.ObjectMeta.Annotations = naming.Merge(cluster.Spec.Metadata. + moveDirJob.Annotations = naming.Merge(cluster.Spec.Metadata. GetAnnotationsOrNil()) labels := naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), naming.DirectoryMoveJobLabels(cluster.Name), map[string]string{ naming.LabelMovePGWalDir: "", }) - moveDirJob.ObjectMeta.Labels = labels + moveDirJob.Labels = labels script := fmt.Sprintf(`echo "Preparing cluster %s volumes for PGO v5.x" echo "pg_wal_pvc=%s" @@ -570,7 +570,7 @@ func (r *Reconciler) reconcileMoveWALDir(ctx context.Context, cluster.Spec.DataSource.Volumes.PGWALVolume.PVCName, cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, cluster.Spec.DataSource.Volumes.PGWALVolume.Directory, - cluster.ObjectMeta.Name) + cluster.Name) container := corev1.Container{ Command: []string{"bash", "-ceu", script}, @@ -665,14 +665,14 @@ func (r *Reconciler) reconcileMoveRepoDir(ctx context.Context, } } - moveDirJob.ObjectMeta.Annotations = naming.Merge( + moveDirJob.Annotations = naming.Merge( cluster.Spec.Metadata.GetAnnotationsOrNil()) labels := naming.Merge(cluster.Spec.Metadata.GetLabelsOrNil(), naming.DirectoryMoveJobLabels(cluster.Name), map[string]string{ naming.LabelMovePGBackRestRepoDir: "", }) - moveDirJob.ObjectMeta.Labels = labels + moveDirJob.Labels = labels script := fmt.Sprintf(`echo "Preparing cluster %s pgBackRest repo volume for PGO v5.x" echo "repo_pvc=%s" diff --git a/internal/controller/postgrescluster/volumes_test.go b/internal/controller/postgrescluster/volumes_test.go index 96eef5f916..cb41de1b77 100644 --- a/internal/controller/postgrescluster/volumes_test.go +++ b/internal/controller/postgrescluster/volumes_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/postgrescluster/watches.go b/internal/controller/postgrescluster/watches.go index 0b5ba5fa87..26900cd9c9 100644 --- a/internal/controller/postgrescluster/watches.go +++ b/internal/controller/postgrescluster/watches.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -50,7 +50,7 @@ func (*Reconciler) watchPods() handler.Funcs { } // Queue an event to start applying changes if the PostgreSQL instance - // now has the "master" role. + // now has the "primary" role. if len(cluster) != 0 && !patroni.PodIsPrimary(e.ObjectOld) && patroni.PodIsPrimary(e.ObjectNew) { diff --git a/internal/controller/postgrescluster/watches_test.go b/internal/controller/postgrescluster/watches_test.go index fdea498862..11a4d957de 100644 --- a/internal/controller/postgrescluster/watches_test.go +++ b/internal/controller/postgrescluster/watches_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/runtime/client.go b/internal/controller/runtime/client.go index 4cc05c9835..e2dbf4db06 100644 --- a/internal/controller/runtime/client.go +++ b/internal/controller/runtime/client.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/runtime/pod_client.go b/internal/controller/runtime/pod_client.go index e842601aa7..a20f92b18b 100644 --- a/internal/controller/runtime/pod_client.go +++ b/internal/controller/runtime/pod_client.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/runtime/reconcile.go b/internal/controller/runtime/reconcile.go index a2196d1626..b7d19506d3 100644 --- a/internal/controller/runtime/reconcile.go +++ b/internal/controller/runtime/reconcile.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/runtime/reconcile_test.go b/internal/controller/runtime/reconcile_test.go index 925b3cf47d..cc57650c56 100644 --- a/internal/controller/runtime/reconcile_test.go +++ b/internal/controller/runtime/reconcile_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/runtime/runtime.go b/internal/controller/runtime/runtime.go index 34bfeabf61..9d9e59854c 100644 --- a/internal/controller/runtime/runtime.go +++ b/internal/controller/runtime/runtime.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -7,6 +7,7 @@ package runtime import ( "context" + volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" "k8s.io/client-go/rest" @@ -18,8 +19,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - - volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" ) type ( diff --git a/internal/controller/runtime/ticker.go b/internal/controller/runtime/ticker.go index 830179eafc..f6c981f350 100644 --- a/internal/controller/runtime/ticker.go +++ b/internal/controller/runtime/ticker.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/runtime/ticker_test.go b/internal/controller/runtime/ticker_test.go index 49cecd79d7..e69bdc7ec8 100644 --- a/internal/controller/runtime/ticker_test.go +++ b/internal/controller/runtime/ticker_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/standalone_pgadmin/apply.go b/internal/controller/standalone_pgadmin/apply.go index 0eaa613df8..0cc3191967 100644 --- a/internal/controller/standalone_pgadmin/apply.go +++ b/internal/controller/standalone_pgadmin/apply.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -22,7 +22,7 @@ func (r *PGAdminReconciler) patch( patch client.Patch, options ...client.PatchOption, ) error { options = append([]client.PatchOption{r.Owner}, options...) - return r.Client.Patch(ctx, object, patch, options...) + return r.Patch(ctx, object, patch, options...) } // apply sends an apply patch to object's endpoint in the Kubernetes API and diff --git a/internal/controller/standalone_pgadmin/config.go b/internal/controller/standalone_pgadmin/config.go index ddd080985b..3af09144f2 100644 --- a/internal/controller/standalone_pgadmin/config.go +++ b/internal/controller/standalone_pgadmin/config.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/standalone_pgadmin/configmap.go b/internal/controller/standalone_pgadmin/configmap.go index d1ec39bf13..150f7ecb12 100644 --- a/internal/controller/standalone_pgadmin/configmap.go +++ b/internal/controller/standalone_pgadmin/configmap.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -12,9 +12,8 @@ import ( "sort" "strconv" - corev1 "k8s.io/api/core/v1" - "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" diff --git a/internal/controller/standalone_pgadmin/configmap_test.go b/internal/controller/standalone_pgadmin/configmap_test.go index 5a844e520c..6e79c4a316 100644 --- a/internal/controller/standalone_pgadmin/configmap_test.go +++ b/internal/controller/standalone_pgadmin/configmap_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -197,12 +197,12 @@ namespace: some-ns assert.NilError(t, err) // Annotations present in the metadata. - assert.DeepEqual(t, configmap.ObjectMeta.Annotations, map[string]string{ + assert.DeepEqual(t, configmap.Annotations, map[string]string{ "a": "v1", "b": "v2", }) // Labels present in the metadata. - assert.DeepEqual(t, configmap.ObjectMeta.Labels, map[string]string{ + assert.DeepEqual(t, configmap.Labels, map[string]string{ "c": "v3", "d": "v4", "postgres-operator.crunchydata.com/pgadmin": "pg1", "postgres-operator.crunchydata.com/role": "pgadmin", diff --git a/internal/controller/standalone_pgadmin/controller.go b/internal/controller/standalone_pgadmin/controller.go index 7e4c43eb9f..d81823a8c2 100644 --- a/internal/controller/standalone_pgadmin/controller.go +++ b/internal/controller/standalone_pgadmin/controller.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -158,7 +158,7 @@ func (r *PGAdminReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct func (r *PGAdminReconciler) setControllerReference( owner *v1beta1.PGAdmin, controlled client.Object, ) error { - return controllerutil.SetControllerReference(owner, controlled, r.Client.Scheme()) + return controllerutil.SetControllerReference(owner, controlled, r.Scheme()) } // deleteControlled safely deletes object when it is controlled by pgAdmin. @@ -170,7 +170,7 @@ func (r *PGAdminReconciler) deleteControlled( version := object.GetResourceVersion() exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} - return r.Client.Delete(ctx, object, exactly) + return r.Delete(ctx, object, exactly) } return nil diff --git a/internal/controller/standalone_pgadmin/controller_test.go b/internal/controller/standalone_pgadmin/controller_test.go index b0fe17cbe6..1bd341d54d 100644 --- a/internal/controller/standalone_pgadmin/controller_test.go +++ b/internal/controller/standalone_pgadmin/controller_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/standalone_pgadmin/helpers_test.go b/internal/controller/standalone_pgadmin/helpers_test.go index 9096edb5a1..abcb1b5f38 100644 --- a/internal/controller/standalone_pgadmin/helpers_test.go +++ b/internal/controller/standalone_pgadmin/helpers_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/standalone_pgadmin/helpers_unit_test.go b/internal/controller/standalone_pgadmin/helpers_unit_test.go index 63887385fc..7f4beb5431 100644 --- a/internal/controller/standalone_pgadmin/helpers_unit_test.go +++ b/internal/controller/standalone_pgadmin/helpers_unit_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/standalone_pgadmin/pod.go b/internal/controller/standalone_pgadmin/pod.go index bbb39b9322..5231771538 100644 --- a/internal/controller/standalone_pgadmin/pod.go +++ b/internal/controller/standalone_pgadmin/pod.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -155,13 +155,13 @@ func pod( }, } - // Creating a readiness probe that will check that the pgAdmin `/login` + // Creating a readiness probe that will check that the pgAdmin `/misc/ping` // endpoint is reachable at the specified port readinessProbe := &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ HTTPGet: &corev1.HTTPGetAction{ Port: intstr.FromInt32(pgAdminPort), - Path: "/login", + Path: "/misc/ping", Scheme: corev1.URISchemeHTTP, }, }, @@ -170,7 +170,7 @@ func pod( // Check the configmap to see if we think TLS is enabled // If so, update the readiness check scheme to HTTPS if strings.Contains(gunicornData, "certfile") && strings.Contains(gunicornData, "keyfile") { - readinessProbe.ProbeHandler.HTTPGet.Scheme = corev1.URISchemeHTTPS + readinessProbe.HTTPGet.Scheme = corev1.URISchemeHTTPS } container.ReadinessProbe = readinessProbe diff --git a/internal/controller/standalone_pgadmin/pod_test.go b/internal/controller/standalone_pgadmin/pod_test.go index 19cee52882..2395515c3e 100644 --- a/internal/controller/standalone_pgadmin/pod_test.go +++ b/internal/controller/standalone_pgadmin/pod_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -105,7 +105,7 @@ containers: protocol: TCP readinessProbe: httpGet: - path: /login + path: /misc/ping port: 5050 scheme: HTTP resources: {} @@ -292,7 +292,7 @@ containers: protocol: TCP readinessProbe: httpGet: - path: /login + path: /misc/ping port: 5050 scheme: HTTP resources: diff --git a/internal/controller/standalone_pgadmin/postgrescluster.go b/internal/controller/standalone_pgadmin/postgrescluster.go index 5327b8ae70..b9c6a55551 100644 --- a/internal/controller/standalone_pgadmin/postgrescluster.go +++ b/internal/controller/standalone_pgadmin/postgrescluster.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -7,12 +7,12 @@ package standalone_pgadmin import ( "context" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" - "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) //+kubebuilder:rbac:groups="postgres-operator.crunchydata.com",resources="pgadmins",verbs={list} diff --git a/internal/controller/standalone_pgadmin/service.go b/internal/controller/standalone_pgadmin/service.go index 2453a6a1fa..bfdc04c6ec 100644 --- a/internal/controller/standalone_pgadmin/service.go +++ b/internal/controller/standalone_pgadmin/service.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -7,16 +7,14 @@ package standalone_pgadmin import ( "context" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/controller-runtime/pkg/client" - apierrors "k8s.io/apimachinery/pkg/api/errors" - - "github.com/pkg/errors" - "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -38,7 +36,7 @@ func (r *PGAdminReconciler) reconcilePGAdminService( // need to delete any existing service(s). At the start of every reconcile // get all services that match the current pgAdmin labels. services := corev1.ServiceList{} - if err := r.Client.List(ctx, &services, + if err := r.List(ctx, &services, client.InNamespace(pgadmin.Namespace), client.MatchingLabels{ naming.LabelStandalonePGAdmin: pgadmin.Name, @@ -64,7 +62,7 @@ func (r *PGAdminReconciler) reconcilePGAdminService( if pgadmin.Spec.ServiceName != "" { // Look for an existing service with name ServiceName in the namespace existingService := &corev1.Service{} - err := r.Client.Get(ctx, types.NamespacedName{ + err := r.Get(ctx, types.NamespacedName{ Name: pgadmin.Spec.ServiceName, Namespace: pgadmin.GetNamespace(), }, existingService) diff --git a/internal/controller/standalone_pgadmin/service_test.go b/internal/controller/standalone_pgadmin/service_test.go index 24b20c8247..a15c89d7ec 100644 --- a/internal/controller/standalone_pgadmin/service_test.go +++ b/internal/controller/standalone_pgadmin/service_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go index e086e333f4..cf2cb1b89d 100644 --- a/internal/controller/standalone_pgadmin/statefulset.go +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -7,14 +7,13 @@ package standalone_pgadmin import ( "context" + "github.com/pkg/errors" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/pkg/errors" - "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" @@ -33,7 +32,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( // When we delete the StatefulSet, we will leave its Pods in place. They will be claimed by // the StatefulSet that gets created in the next reconcile. existing := &appsv1.StatefulSet{} - if err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { + if err := errors.WithStack(r.Get(ctx, client.ObjectKeyFromObject(sts), existing)); err != nil { if !apierrors.IsNotFound(err) { return err } @@ -46,7 +45,7 @@ func (r *PGAdminReconciler) reconcilePGAdminStatefulSet( exactly := client.Preconditions{UID: &uid, ResourceVersion: &version} propagate := client.PropagationPolicy(metav1.DeletePropagationOrphan) - return errors.WithStack(client.IgnoreNotFound(r.Client.Delete(ctx, existing, exactly, propagate))) + return errors.WithStack(client.IgnoreNotFound(r.Delete(ctx, existing, exactly, propagate))) } } diff --git a/internal/controller/standalone_pgadmin/statefulset_test.go b/internal/controller/standalone_pgadmin/statefulset_test.go index 52c501b357..9b7e11e8d8 100644 --- a/internal/controller/standalone_pgadmin/statefulset_test.go +++ b/internal/controller/standalone_pgadmin/statefulset_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -35,6 +35,7 @@ func TestReconcilePGAdminStatefulSet(t *testing.T) { pgadmin := new(v1beta1.PGAdmin) pgadmin.Name = "test-standalone-pgadmin" pgadmin.Namespace = ns.Name + pgadmin.Spec.Image = initialize.String("some-image") assert.NilError(t, cc.Create(ctx, pgadmin)) t.Cleanup(func() { assert.Check(t, cc.Delete(ctx, pgadmin)) }) @@ -103,6 +104,7 @@ terminationGracePeriodSeconds: 30 // add pod level customizations custompgadmin.Name = "custom-pgadmin" custompgadmin.Namespace = ns.Name + custompgadmin.Spec.Image = initialize.String("some-image") // annotation and label custompgadmin.Spec.Metadata = &v1beta1.Metadata{ diff --git a/internal/controller/standalone_pgadmin/users.go b/internal/controller/standalone_pgadmin/users.go index 3c9a3ce05b..027960e90c 100644 --- a/internal/controller/standalone_pgadmin/users.go +++ b/internal/controller/standalone_pgadmin/users.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -53,7 +53,7 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * pod := &corev1.Pod{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} pod.Name += "-0" - err := errors.WithStack(r.Client.Get(ctx, client.ObjectKeyFromObject(pod), pod)) + err := errors.WithStack(r.Get(ctx, client.ObjectKeyFromObject(pod), pod)) if err != nil { return client.IgnoreNotFound(err) } @@ -79,28 +79,53 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * return nil } - // If the pgAdmin version is not in the status or the image SHA has changed, get - // the pgAdmin version and store it in the status. - var pgadminVersion int - if pgadmin.Status.MajorVersion == 0 || pgadmin.Status.ImageSHA != pgAdminImageSha { - pgadminVersion, err = r.reconcilePGAdminMajorVersion(ctx, podExecutor) + // If the pgAdmin major or minor version is not in the status or the image + // SHA has changed, get the pgAdmin version and store it in the status. + var pgadminMajorVersion int + if pgadmin.Status.MajorVersion == 0 || pgadmin.Status.MinorVersion == "" || + pgadmin.Status.ImageSHA != pgAdminImageSha { + + // exec into the pgAdmin pod and retrieve the pgAdmin minor version + script := fmt.Sprintf(` +PGADMIN_DIR=%s +cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_VERSION)" +`, pgAdminDir) + + var stdin, stdout, stderr bytes.Buffer + + if err := podExecutor(ctx, &stdin, &stdout, &stderr, + []string{"bash", "-ceu", "--", script}...); err != nil { + return err + } + + pgadminMinorVersion := strings.TrimSpace(stdout.String()) + + // ensure minor version is valid before storing in status + parsedMinorVersion, err := strconv.ParseFloat(pgadminMinorVersion, 64) if err != nil { return err } - pgadmin.Status.MajorVersion = pgadminVersion + + // Note: "When converting a floating-point number to an integer, the + // fraction is discarded (truncation towards zero)." + // - https://go.dev/ref/spec#Conversions + pgadminMajorVersion = int(parsedMinorVersion) + + pgadmin.Status.MinorVersion = pgadminMinorVersion + pgadmin.Status.MajorVersion = pgadminMajorVersion pgadmin.Status.ImageSHA = pgAdminImageSha } else { - pgadminVersion = pgadmin.Status.MajorVersion + pgadminMajorVersion = pgadmin.Status.MajorVersion } // If the pgAdmin version is not v8 or higher, return early as user management is // only supported for pgAdmin v8 and higher. - if pgadminVersion < 8 { + if pgadminMajorVersion < 8 { // If pgAdmin version is less than v8 and user management is being attempted, // log a message clarifying that it is only supported for pgAdmin v8 and higher. if len(pgadmin.Spec.Users) > 0 { log.Info("User management is only supported for pgAdmin v8 and higher.", - "pgadminVersion", pgadminVersion) + "pgadminVersion", pgadminMajorVersion) } return err } @@ -108,25 +133,6 @@ func (r *PGAdminReconciler) reconcilePGAdminUsers(ctx context.Context, pgadmin * return r.writePGAdminUsers(ctx, pgadmin, podExecutor) } -// reconcilePGAdminMajorVersion execs into the pgAdmin pod and retrieves the pgAdmin major version -func (r *PGAdminReconciler) reconcilePGAdminMajorVersion(ctx context.Context, exec Executor) (int, error) { - script := fmt.Sprintf(` -PGADMIN_DIR=%s -cd $PGADMIN_DIR && python3 -c "import config; print(config.APP_RELEASE)" -`, pgAdminDir) - - var stdin, stdout, stderr bytes.Buffer - - err := exec(ctx, &stdin, &stdout, &stderr, - []string{"bash", "-ceu", "--", script}...) - - if err != nil { - return 0, err - } - - return strconv.Atoi(strings.TrimSpace(stdout.String())) -} - // writePGAdminUsers takes the users in the pgAdmin spec and writes (adds or updates) their data // to both pgAdmin and the users.json file that is stored in the pgAdmin secret. If a user is // removed from the spec, its data is removed from users.json, but it is not deleted from pgAdmin. @@ -136,7 +142,7 @@ func (r *PGAdminReconciler) writePGAdminUsers(ctx context.Context, pgadmin *v1be existingUserSecret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(existingUserSecret), existingUserSecret)) + r.Get(ctx, client.ObjectKeyFromObject(existingUserSecret), existingUserSecret)) if client.IgnoreNotFound(err) != nil { return err } @@ -170,10 +176,25 @@ cd $PGADMIN_DIR for _, user := range existingUsersArr { existingUsersMap[user.Username] = user } + + var olderThan9_3 bool + versionFloat, err := strconv.ParseFloat(pgadmin.Status.MinorVersion, 64) + if err != nil { + return err + } + if versionFloat < 9.3 { + olderThan9_3 = true + } + intentUsers := []pgAdminUserForJson{} for _, user := range pgadmin.Spec.Users { var stdin, stdout, stderr bytes.Buffer - typeFlag := "--nonadmin" + // starting in pgAdmin 9.3, custom roles are supported and a new flag is used + // - https://github.com/pgadmin-org/pgadmin4/pull/8631 + typeFlag := "--role User" + if olderThan9_3 { + typeFlag = "--nonadmin" + } isAdmin := false if user.Role == "Administrator" { typeFlag = "--admin" @@ -183,10 +204,10 @@ cd $PGADMIN_DIR // Get password from secret userPasswordSecret := &corev1.Secret{ObjectMeta: metav1.ObjectMeta{ Namespace: pgadmin.Namespace, - Name: user.PasswordRef.LocalObjectReference.Name, + Name: user.PasswordRef.Name, }} err := errors.WithStack( - r.Client.Get(ctx, client.ObjectKeyFromObject(userPasswordSecret), userPasswordSecret)) + r.Get(ctx, client.ObjectKeyFromObject(userPasswordSecret), userPasswordSecret)) if err != nil { log.Error(err, "Could not get user password secret") continue @@ -229,8 +250,13 @@ cd $PGADMIN_DIR log.Error(err, "PodExec failed: ") intentUsers = append(intentUsers, existingUser) continue + + } else if strings.Contains(strings.TrimSpace(stderr.String()), "UserWarning: pkg_resources is deprecated as an API") { + // Started seeing this error with pgAdmin 9.7 when using Python 3.11. + // Issue appears to resolve with Python 3.13. + log.Info(stderr.String()) } else if strings.TrimSpace(stderr.String()) != "" { - log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py update-user error for %s: ", intentUser.Username)) intentUsers = append(intentUsers, existingUser) continue @@ -263,8 +289,12 @@ cd $PGADMIN_DIR log.Error(err, "PodExec failed: ") continue } - if strings.TrimSpace(stderr.String()) != "" { - log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py error for %s: ", + if strings.Contains(strings.TrimSpace(stderr.String()), "UserWarning: pkg_resources is deprecated as an API") { + // Started seeing this error with pgAdmin 9.7 when using Python 3.11. + // Issue appears to resolve with Python 3.13. + log.Info(stderr.String()) + } else if strings.TrimSpace(stderr.String()) != "" { + log.Error(errors.New(stderr.String()), fmt.Sprintf("pgAdmin setup.py add-user error for %s: ", intentUser.Username)) continue } @@ -293,11 +323,7 @@ cd $PGADMIN_DIR // to add a user, that user will not be in intentUsers. If errors occurred when attempting to // update a user, the user will be in intentUsers as it existed before. We now want to marshal the // intentUsers to json and write the users.json file to the secret. - usersJSON, err := json.Marshal(intentUsers) - if err != nil { - return err - } - intentUserSecret.Data["users.json"] = usersJSON + intentUserSecret.Data["users.json"], _ = json.Marshal(intentUsers) err = errors.WithStack(r.setControllerReference(pgadmin, intentUserSecret)) if err == nil { diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go index 409fcea701..57910b0926 100644 --- a/internal/controller/standalone_pgadmin/users_test.go +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -7,12 +7,12 @@ package standalone_pgadmin import ( "context" "encoding/json" + "errors" "fmt" "io" "strings" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -110,15 +110,16 @@ func TestReconcilePGAdminUsers(t *testing.T) { assert.Equal(t, namespace, pgadmin.Namespace) assert.Equal(t, container, naming.ContainerPGAdmin) - // Simulate a v7 version of pgAdmin by setting stdout to "7" for - // podexec call in reconcilePGAdminMajorVersion - stdout.Write([]byte("7")) + // Simulate a v7.1 version of pgAdmin by setting stdout to "7.1" + // for podexec call in reconcilePGAdminVersion + _, _ = stdout.Write([]byte("7.1")) return nil } assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) assert.Equal(t, calls, 1, "PodExec should be called once") assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.MinorVersion, "7.1") assert.Equal(t, pgadmin.Status.ImageSHA, "fakeSHA") }) @@ -145,78 +146,89 @@ func TestReconcilePGAdminUsers(t *testing.T) { ) error { calls++ - // Simulate a v7 version of pgAdmin by setting stdout to "7" for - // podexec call in reconcilePGAdminMajorVersion - stdout.Write([]byte("7")) + // Simulate a v7.1 version of pgAdmin by setting stdout to "7.1" + // for podexec call in reconcilePGAdminVersion + _, _ = stdout.Write([]byte("7.1")) return nil } assert.NilError(t, r.reconcilePGAdminUsers(ctx, pgadmin)) assert.Equal(t, calls, 1, "PodExec should be called once") assert.Equal(t, pgadmin.Status.MajorVersion, 7) + assert.Equal(t, pgadmin.Status.MinorVersion, "7.1") assert.Equal(t, pgadmin.Status.ImageSHA, "newFakeSHA") }) -} -func TestReconcilePGAdminMajorVersion(t *testing.T) { - ctx := context.Background() - pod := corev1.Pod{} - pod.Namespace = "test-namespace" - pod.Name = "pgadmin-123-0" - reconciler := &PGAdminReconciler{} + t.Run("PodHealthyBadVersion", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pod := pod.DeepCopy() - podExecutor := func( - ctx context.Context, stdin io.Reader, stdout, stderr io.Writer, command ...string, - ) error { - return reconciler.PodExec(ctx, pod.Namespace, pod.Name, "pgadmin", stdin, stdout, stderr, command...) - } + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "fakeSHA" - t.Run("SuccessfulRetrieval", func(t *testing.T) { - reconciler.PodExec = func( + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { + calls++ + assert.Equal(t, pod, "pgadmin-123-0") - assert.Equal(t, namespace, "test-namespace") + assert.Equal(t, namespace, pgadmin.Namespace) assert.Equal(t, container, naming.ContainerPGAdmin) - // Simulate a v7 version of pgAdmin by setting stdout to "7" for - // podexec call in reconcilePGAdminMajorVersion - stdout.Write([]byte("7")) + // set expected version to something completely wrong + _, _ = stdout.Write([]byte("woot")) return nil } - version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) - assert.NilError(t, err) - assert.Equal(t, version, 7) + assert.ErrorContains(t, r.reconcilePGAdminUsers(ctx, pgadmin), "strconv.ParseFloat: parsing \"woot\": invalid syntax") + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 0) + assert.Equal(t, pgadmin.Status.MinorVersion, "") + assert.Equal(t, pgadmin.Status.ImageSHA, "") }) - t.Run("FailedRetrieval", func(t *testing.T) { - reconciler.PodExec = func( - ctx context.Context, namespace, pod, container string, - stdin io.Reader, stdout, stderr io.Writer, command ...string, - ) error { - // Simulate the python call giving bad data (not a version int) - stdout.Write([]byte("asdfjkl;")) - return nil - } + t.Run("PodExecError", func(t *testing.T) { + pgadmin := pgadmin.DeepCopy() + pod := pod.DeepCopy() - version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) - assert.Check(t, err != nil) - assert.Equal(t, version, 0) - }) + pod.DeletionTimestamp = nil + pod.Status.ContainerStatuses = + []corev1.ContainerStatus{{Name: naming.ContainerPGAdmin}} + pod.Status.ContainerStatuses[0].State.Running = + new(corev1.ContainerStateRunning) + pod.Status.ContainerStatuses[0].ImageID = "fakeSHA" - t.Run("PodExecError", func(t *testing.T) { - reconciler.PodExec = func( + r := new(PGAdminReconciler) + r.Client = fake.NewClientBuilder().WithObjects(pod).Build() + + calls := 0 + r.PodExec = func( ctx context.Context, namespace, pod, container string, stdin io.Reader, stdout, stderr io.Writer, command ...string, ) error { + calls++ + + assert.Equal(t, pod, "pgadmin-123-0") + assert.Equal(t, namespace, pgadmin.Namespace) + assert.Equal(t, container, naming.ContainerPGAdmin) + return errors.New("PodExecError") } - version, err := reconciler.reconcilePGAdminMajorVersion(ctx, podExecutor) - assert.Check(t, err != nil) - assert.Equal(t, version, 0) + assert.Error(t, r.reconcilePGAdminUsers(ctx, pgadmin), "PodExecError") + assert.Equal(t, calls, 1, "PodExec should be called once") + assert.Equal(t, pgadmin.Status.MajorVersion, 0) + assert.Equal(t, pgadmin.Status.MinorVersion, "") + assert.Equal(t, pgadmin.Status.ImageSHA, "") }) } @@ -238,6 +250,14 @@ func TestWritePGAdminUsers(t *testing.T) { pgadmin.Namespace = ns.Name assert.NilError(t, cc.Create(ctx, pgadmin)) + // fake the status so that the correct commands will be used when creating + // users. + pgadmin.Status = v1beta1.PGAdminStatus{ + ImageSHA: "fakesha", + MajorVersion: 9, + MinorVersion: "9.3", + } + userPasswordSecret1 := &corev1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "user-password-secret1", @@ -310,8 +330,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 1, "PodExec should be called once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -370,8 +390,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -442,8 +462,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -487,8 +507,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 0, "PodExec should be called zero times") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -529,8 +549,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -556,8 +576,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 2, "PodExec should be called once more") // User in users.json should be unchanged - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -609,8 +629,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -637,8 +657,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -665,8 +685,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -694,8 +714,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) diff --git a/internal/controller/standalone_pgadmin/volume.go b/internal/controller/standalone_pgadmin/volume.go index 7615f6142b..b37c21ed9a 100644 --- a/internal/controller/standalone_pgadmin/volume.go +++ b/internal/controller/standalone_pgadmin/volume.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -7,14 +7,13 @@ package standalone_pgadmin import ( "context" + "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/validation/field" - "github.com/pkg/errors" - "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) diff --git a/internal/controller/standalone_pgadmin/volume_test.go b/internal/controller/standalone_pgadmin/volume_test.go index 645c228277..b0113cba64 100644 --- a/internal/controller/standalone_pgadmin/volume_test.go +++ b/internal/controller/standalone_pgadmin/volume_test.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -6,6 +6,7 @@ package standalone_pgadmin import ( "context" + "errors" "testing" "gotest.tools/v3/assert" @@ -16,8 +17,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/pkg/errors" - "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" diff --git a/internal/controller/standalone_pgadmin/watches.go b/internal/controller/standalone_pgadmin/watches.go index 49ac1ebd29..8991c71244 100644 --- a/internal/controller/standalone_pgadmin/watches.go +++ b/internal/controller/standalone_pgadmin/watches.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -91,7 +91,7 @@ func (r *PGAdminReconciler) findPGAdminsForSecret( }); err == nil { for i := range pgadmins.Items { for j := range pgadmins.Items[i].Spec.Users { - if pgadmins.Items[i].Spec.Users[j].PasswordRef.LocalObjectReference.Name == secret.Name { + if pgadmins.Items[i].Spec.Users[j].PasswordRef.Name == secret.Name { matching = append(matching, &pgadmins.Items[i]) break } diff --git a/internal/controller/standalone_pgadmin/watches_test.go b/internal/controller/standalone_pgadmin/watches_test.go index 1419eb9efa..649451add6 100644 --- a/internal/controller/standalone_pgadmin/watches_test.go +++ b/internal/controller/standalone_pgadmin/watches_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/feature/features.go b/internal/feature/features.go index db424ead42..472db878af 100644 --- a/internal/feature/features.go +++ b/internal/feature/features.go @@ -1,4 +1,4 @@ -// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -42,6 +42,9 @@ package feature import ( "context" + "fmt" + "slices" + "strings" "k8s.io/component-base/featuregate" ) @@ -51,7 +54,6 @@ type Feature = featuregate.Feature // Gate indicates what features exist and which are enabled. type Gate interface { Enabled(Feature) bool - String() string } // MutableGate contains features that can be enabled or disabled. @@ -81,6 +83,9 @@ const ( // Support custom sidecars for pgBouncer Pods PGBouncerSidecars = "PGBouncerSidecars" + // Adjust PGUpgrade parallelism according to CPU resources + PGUpgradeCPUConcurrency = "PGUpgradeCPUConcurrency" + // Support tablespace volumes TablespaceVolumes = "TablespaceVolumes" @@ -93,14 +98,15 @@ func NewGate() MutableGate { gate := featuregate.NewFeatureGate() if err := gate.Add(map[Feature]featuregate.FeatureSpec{ - AppendCustomQueries: {Default: false, PreRelease: featuregate.Alpha}, - AutoCreateUserSchema: {Default: true, PreRelease: featuregate.Beta}, - AutoGrowVolumes: {Default: false, PreRelease: featuregate.Alpha}, - BridgeIdentifiers: {Default: false, PreRelease: featuregate.Alpha}, - InstanceSidecars: {Default: false, PreRelease: featuregate.Alpha}, - PGBouncerSidecars: {Default: false, PreRelease: featuregate.Alpha}, - TablespaceVolumes: {Default: false, PreRelease: featuregate.Alpha}, - VolumeSnapshots: {Default: false, PreRelease: featuregate.Alpha}, + AppendCustomQueries: {Default: false, PreRelease: featuregate.Alpha}, + AutoCreateUserSchema: {Default: true, PreRelease: featuregate.Beta}, + AutoGrowVolumes: {Default: false, PreRelease: featuregate.Alpha}, + BridgeIdentifiers: {Default: false, PreRelease: featuregate.Alpha}, + InstanceSidecars: {Default: false, PreRelease: featuregate.Alpha}, + PGBouncerSidecars: {Default: false, PreRelease: featuregate.Alpha}, + PGUpgradeCPUConcurrency: {Default: false, PreRelease: featuregate.Alpha}, + TablespaceVolumes: {Default: false, PreRelease: featuregate.Alpha}, + VolumeSnapshots: {Default: false, PreRelease: featuregate.Alpha}, }); err != nil { panic(err) } @@ -122,11 +128,36 @@ func NewContext(ctx context.Context, gate Gate) context.Context { return context.WithValue(ctx, contextKey{}, gate) } -func ShowGates(ctx context.Context) string { - featuresEnabled := "" - gate, ok := ctx.Value(contextKey{}).(Gate) - if ok { - featuresEnabled = gate.String() +// ShowEnabled returns all the features enabled in the Gate contained in ctx. +func ShowEnabled(ctx context.Context) string { + featuresEnabled := []string{} + if gate, ok := ctx.Value(contextKey{}).(interface { + Gate + GetAll() map[Feature]featuregate.FeatureSpec + }); ok { + specs := gate.GetAll() + for feature := range specs { + // `gate.Enabled` first checks if the feature is enabled; + // then (if not explicitly set by the user), + // it checks if the feature is on/true by default + if gate.Enabled(feature) { + featuresEnabled = append(featuresEnabled, fmt.Sprintf("%s=true", feature)) + } + } + } + slices.Sort(featuresEnabled) + return strings.Join(featuresEnabled, ",") +} + +// ShowAssigned returns the features enabled or disabled by Set and SetFromMap +// in the Gate contained in ctx. +func ShowAssigned(ctx context.Context) string { + featuresAssigned := "" + if gate, ok := ctx.Value(contextKey{}).(interface { + Gate + String() string + }); ok { + featuresAssigned = gate.String() } - return featuresEnabled + return featuresAssigned } diff --git a/internal/feature/features_test.go b/internal/feature/features_test.go index f76dd216e6..a70270e0b9 100644 --- a/internal/feature/features_test.go +++ b/internal/feature/features_test.go @@ -1,4 +1,4 @@ -// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -6,6 +6,7 @@ package feature import ( "context" + "strings" "testing" "gotest.tools/v3/assert" @@ -21,10 +22,9 @@ func TestDefaults(t *testing.T) { assert.Assert(t, false == gate.Enabled(BridgeIdentifiers)) assert.Assert(t, false == gate.Enabled(InstanceSidecars)) assert.Assert(t, false == gate.Enabled(PGBouncerSidecars)) + assert.Assert(t, false == gate.Enabled(PGUpgradeCPUConcurrency)) assert.Assert(t, false == gate.Enabled(TablespaceVolumes)) assert.Assert(t, false == gate.Enabled(VolumeSnapshots)) - - assert.Equal(t, gate.String(), "") } func TestStringFormat(t *testing.T) { @@ -33,7 +33,6 @@ func TestStringFormat(t *testing.T) { assert.NilError(t, gate.Set("")) assert.NilError(t, gate.Set("TablespaceVolumes=true")) - assert.Equal(t, gate.String(), "TablespaceVolumes=true") assert.Assert(t, true == gate.Enabled(TablespaceVolumes)) err := gate.Set("NotAGate=true") @@ -53,13 +52,21 @@ func TestContext(t *testing.T) { t.Parallel() gate := NewGate() ctx := NewContext(context.Background(), gate) - assert.Equal(t, ShowGates(ctx), "") + + assert.Equal(t, ShowAssigned(ctx), "") + assert.Assert(t, ShowEnabled(ctx) != "") // This assumes some feature is enabled by default. assert.NilError(t, gate.Set("TablespaceVolumes=true")) - assert.Assert(t, true == Enabled(ctx, TablespaceVolumes)) - assert.Equal(t, ShowGates(ctx), "TablespaceVolumes=true") + assert.Assert(t, Enabled(ctx, TablespaceVolumes)) + assert.Equal(t, ShowAssigned(ctx), "TablespaceVolumes=true") + assert.Assert(t, + strings.Contains(ShowEnabled(ctx), "TablespaceVolumes=true"), + "got: %v", ShowEnabled(ctx)) assert.NilError(t, gate.SetFromMap(map[string]bool{TablespaceVolumes: false})) - assert.Assert(t, false == Enabled(ctx, TablespaceVolumes)) - assert.Equal(t, ShowGates(ctx), "TablespaceVolumes=false") + assert.Assert(t, !Enabled(ctx, TablespaceVolumes)) + assert.Equal(t, ShowAssigned(ctx), "TablespaceVolumes=false") + assert.Assert(t, + !strings.Contains(ShowEnabled(ctx), "TablespaceVolumes"), + "got: %v", ShowEnabled(ctx)) } diff --git a/internal/initialize/doc.go b/internal/initialize/doc.go index aedd85846f..cd1d277e73 100644 --- a/internal/initialize/doc.go +++ b/internal/initialize/doc.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/initialize/metadata.go b/internal/initialize/metadata.go index d62530736a..dac9ce306c 100644 --- a/internal/initialize/metadata.go +++ b/internal/initialize/metadata.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/initialize/metadata_test.go b/internal/initialize/metadata_test.go index 735e455a2e..8afbed4ad5 100644 --- a/internal/initialize/metadata_test.go +++ b/internal/initialize/metadata_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/initialize/primitives.go b/internal/initialize/primitives.go index 9bc264f88c..a451db0ead 100644 --- a/internal/initialize/primitives.go +++ b/internal/initialize/primitives.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/initialize/primitives_test.go b/internal/initialize/primitives_test.go index e39898b4fe..046c5b77e0 100644 --- a/internal/initialize/primitives_test.go +++ b/internal/initialize/primitives_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/initialize/security.go b/internal/initialize/security.go index 5dd52d7b1e..f7b451f502 100644 --- a/internal/initialize/security.go +++ b/internal/initialize/security.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/initialize/security_test.go b/internal/initialize/security_test.go index 0a6409cf41..6573143be6 100644 --- a/internal/initialize/security_test.go +++ b/internal/initialize/security_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/kubeapi/patch.go b/internal/kubeapi/patch.go index 973852c17a..95bcc9a6e1 100644 --- a/internal/kubeapi/patch.go +++ b/internal/kubeapi/patch.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -18,12 +18,10 @@ var escapeJSONPointer = strings.NewReplacer( "/", "~1", ).Replace -// JSON6902 represents a JSON Patch according to RFC 6902; the same as -// k8s.io/apimachinery/pkg/types.JSONPatchType. -type JSON6902 []interface{} +// JSON6902 represents a JSON Patch according to RFC 6902; the same as [types.JSONPatchType]. +type JSON6902 []any -// NewJSONPatch creates a new JSON Patch according to RFC 6902; the same as -// k8s.io/apimachinery/pkg/types.JSONPatchType. +// NewJSONPatch creates a new JSON Patch according to RFC 6902; the same as [types.JSONPatchType]. func NewJSONPatch() *JSON6902 { return &JSON6902{} } func (*JSON6902) pointer(tokens ...string) string { @@ -50,10 +48,10 @@ func (*JSON6902) pointer(tokens ...string) string { // > // > o If the target location specifies an object member that does exist, // > that member's value is replaced. -func (patch *JSON6902) Add(path ...string) func(value interface{}) *JSON6902 { +func (patch *JSON6902) Add(path ...string) func(value any) *JSON6902 { i := len(*patch) - f := func(value interface{}) *JSON6902 { - (*patch)[i] = map[string]interface{}{ + f := func(value any) *JSON6902 { + (*patch)[i] = map[string]any{ "op": "add", "path": patch.pointer(path...), "value": value, @@ -72,7 +70,7 @@ func (patch *JSON6902) Add(path ...string) func(value interface{}) *JSON6902 { // > // > The target location MUST exist for the operation to be successful. func (patch *JSON6902) Remove(path ...string) *JSON6902 { - *patch = append(*patch, map[string]interface{}{ + *patch = append(*patch, map[string]any{ "op": "remove", "path": patch.pointer(path...), }) @@ -86,10 +84,10 @@ func (patch *JSON6902) Remove(path ...string) *JSON6902 { // > with a new value. // > // > The target location MUST exist for the operation to be successful. -func (patch *JSON6902) Replace(path ...string) func(value interface{}) *JSON6902 { +func (patch *JSON6902) Replace(path ...string) func(value any) *JSON6902 { i := len(*patch) - f := func(value interface{}) *JSON6902 { - (*patch)[i] = map[string]interface{}{ + f := func(value any) *JSON6902 { + (*patch)[i] = map[string]any{ "op": "replace", "path": patch.pointer(path...), "value": value, @@ -103,23 +101,21 @@ func (patch *JSON6902) Replace(path ...string) func(value interface{}) *JSON6902 } // Bytes returns the JSON representation of patch. -func (patch JSON6902) Bytes() ([]byte, error) { return patch.Data(nil) } +func (patch *JSON6902) Bytes() ([]byte, error) { return patch.Data(nil) } // Data returns the JSON representation of patch. -func (patch JSON6902) Data(client.Object) ([]byte, error) { return json.Marshal(patch) } +func (patch *JSON6902) Data(client.Object) ([]byte, error) { return json.Marshal(*patch) } // IsEmpty returns true when patch has no operations. -func (patch JSON6902) IsEmpty() bool { return len(patch) == 0 } +func (patch *JSON6902) IsEmpty() bool { return len(*patch) == 0 } -// Type returns k8s.io/apimachinery/pkg/types.JSONPatchType. -func (patch JSON6902) Type() types.PatchType { return types.JSONPatchType } +// Type returns [types.JSONPatchType]. +func (patch *JSON6902) Type() types.PatchType { return types.JSONPatchType } -// Merge7386 represents a JSON Merge Patch according to RFC 7386; the same as -// k8s.io/apimachinery/pkg/types.MergePatchType. -type Merge7386 map[string]interface{} +// Merge7386 represents a JSON Merge Patch according to RFC 7386; the same as [types.MergePatchType]. +type Merge7386 map[string]any -// NewMergePatch creates a new JSON Merge Patch according to RFC 7386; the same -// as k8s.io/apimachinery/pkg/types.MergePatchType. +// NewMergePatch creates a new JSON Merge Patch according to RFC 7386; the same as [types.MergePatchType]. func NewMergePatch() *Merge7386 { return &Merge7386{} } // Add modifies patch to indicate that the member at path should be added or @@ -130,7 +126,7 @@ func NewMergePatch() *Merge7386 { return &Merge7386{} } // > contain the member, the value is replaced. Null values in the merge // > patch are given special meaning to indicate the removal of existing // > values in the target. -func (patch *Merge7386) Add(path ...string) func(value interface{}) *Merge7386 { +func (patch *Merge7386) Add(path ...string) func(value any) *Merge7386 { position := *patch for len(path) > 1 { @@ -145,10 +141,10 @@ func (patch *Merge7386) Add(path ...string) func(value interface{}) *Merge7386 { } if len(path) < 1 { - return func(interface{}) *Merge7386 { return patch } + return func(any) *Merge7386 { return patch } } - f := func(value interface{}) *Merge7386 { + f := func(value any) *Merge7386 { position[path[0]] = value return patch } @@ -165,13 +161,13 @@ func (patch *Merge7386) Remove(path ...string) *Merge7386 { } // Bytes returns the JSON representation of patch. -func (patch Merge7386) Bytes() ([]byte, error) { return patch.Data(nil) } +func (patch *Merge7386) Bytes() ([]byte, error) { return patch.Data(nil) } // Data returns the JSON representation of patch. -func (patch Merge7386) Data(client.Object) ([]byte, error) { return json.Marshal(patch) } +func (patch *Merge7386) Data(client.Object) ([]byte, error) { return json.Marshal(*patch) } // IsEmpty returns true when patch has no modifications. -func (patch Merge7386) IsEmpty() bool { return len(patch) == 0 } +func (patch *Merge7386) IsEmpty() bool { return len(*patch) == 0 } -// Type returns k8s.io/apimachinery/pkg/types.MergePatchType. -func (patch Merge7386) Type() types.PatchType { return types.MergePatchType } +// Type returns [types.MergePatchType]. +func (patch *Merge7386) Type() types.PatchType { return types.MergePatchType } diff --git a/internal/kubeapi/patch_test.go b/internal/kubeapi/patch_test.go index 52f5787b8f..91f6bdebd8 100644 --- a/internal/kubeapi/patch_test.go +++ b/internal/kubeapi/patch_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/logging/logr.go b/internal/logging/logr.go index c907997d40..74c66c87b0 100644 --- a/internal/logging/logr.go +++ b/internal/logging/logr.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -51,12 +51,12 @@ type sink struct { depth int verbosity int names []string - values []interface{} + values []any // TODO(cbandy): add names or frame to the functions below. - fnError func(error, string, ...interface{}) - fnInfo func(int, string, ...interface{}) + fnError func(error, string, ...any) + fnInfo func(int, string, ...any) } var _ logr.LogSink = (*sink)(nil) @@ -64,7 +64,7 @@ var _ logr.LogSink = (*sink)(nil) func (s *sink) Enabled(level int) bool { return level <= s.verbosity } func (s *sink) Init(info logr.RuntimeInfo) { s.depth = info.CallDepth } -func (s sink) combineValues(kv ...interface{}) []interface{} { +func (s *sink) combineValues(kv ...any) []any { if len(kv) == 0 { return s.values } @@ -74,11 +74,11 @@ func (s sink) combineValues(kv ...interface{}) []interface{} { return kv } -func (s *sink) Error(err error, msg string, kv ...interface{}) { +func (s *sink) Error(err error, msg string, kv ...any) { s.fnError(err, msg, s.combineValues(kv...)...) } -func (s *sink) Info(level int, msg string, kv ...interface{}) { +func (s *sink) Info(level int, msg string, kv ...any) { s.fnInfo(level, msg, s.combineValues(kv...)...) } @@ -89,7 +89,7 @@ func (s *sink) WithName(name string) logr.LogSink { return &out } -func (s *sink) WithValues(kv ...interface{}) logr.LogSink { +func (s *sink) WithValues(kv ...any) logr.LogSink { n := len(s.values) out := *s out.values = append(out.values[:n:n], kv...) diff --git a/internal/logging/logr_test.go b/internal/logging/logr_test.go index 1cbc818ad9..c4e0abf38c 100644 --- a/internal/logging/logr_test.go +++ b/internal/logging/logr_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/logging/logrus.go b/internal/logging/logrus.go index 9683a104d1..19ca3e2aa3 100644 --- a/internal/logging/logrus.go +++ b/internal/logging/logrus.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/logging/logrus_test.go b/internal/logging/logrus_test.go index 3e73193d1a..d74a3a0e01 100644 --- a/internal/logging/logrus_test.go +++ b/internal/logging/logrus_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -12,7 +12,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" + "github.com/pkg/errors" //nolint:depguard // This is testing the logging of stack frames. "gotest.tools/v3/assert" ) diff --git a/internal/naming/annotations.go b/internal/naming/annotations.go index 2179a5f084..0721fede31 100644 --- a/internal/naming/annotations.go +++ b/internal/naming/annotations.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -32,6 +32,14 @@ const ( // (and therefore must be recreated) PGBackRestConfigHash = annotationPrefix + "pgbackrest-hash" + // PGBackRestCurrentConfig is an annotation used to indicate the name of the pgBackRest + // configuration associated with a specific Job as determined by either the current primary + // (if no dedicated repository host is enabled), or the dedicated repository host. This helps + // in detecting pgBackRest backup Jobs that no longer mount the proper pgBackRest + // configuration, e.g. because a failover has occurred, or because dedicated repo host has been + // enabled or disabled. + PGBackRestCurrentConfig = annotationPrefix + "pgbackrest-config" + // PGBackRestRestore is the annotation that is added to a PostgresCluster to initiate an in-place // restore. The value of the annotation will be a unique identifier for a restore Job (e.g. a // timestamp), which will be stored in the PostgresCluster status to properly track completion @@ -46,6 +54,10 @@ const ( // bind all addresses does not work in certain IPv6 environments. PGBackRestIPVersion = annotationPrefix + "pgbackrest-ip-version" + // PGBackRestCloudLogVolume is an annotation used to indicate which persistent volume claim + // should be mounted to cloud repo backup jobs so that the backup logs can be persisted. + PGBackRestCloudLogVolume = annotationPrefix + "pgbackrest-cloud-log-volume" + // PostgresExporterCollectorsAnnotation is an annotation used to allow users to control whether or // not postgres_exporter default metrics, settings, and collectors are enabled. The value "None" // disables all postgres_exporter defaults. Disabling the defaults may cause errors in dashboards. diff --git a/internal/naming/annotations_test.go b/internal/naming/annotations_test.go index 318dd5ab5c..9553e5e72a 100644 --- a/internal/naming/annotations_test.go +++ b/internal/naming/annotations_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -20,7 +20,9 @@ func TestAnnotationsValid(t *testing.T) { assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackup)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestBackupJobCompletion)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestConfigHash)) + assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestCurrentConfig)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestIPVersion)) + assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestCloudLogVolume)) assert.Assert(t, nil == validation.IsQualifiedName(PGBackRestRestore)) assert.Assert(t, nil == validation.IsQualifiedName(PostgresExporterCollectorsAnnotation)) } diff --git a/internal/naming/controllers.go b/internal/naming/controllers.go index 3d492e8a3a..b434b8dbc5 100644 --- a/internal/naming/controllers.go +++ b/internal/naming/controllers.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/naming/dns.go b/internal/naming/dns.go index d3351a5d70..0c6f68df0e 100644 --- a/internal/naming/dns.go +++ b/internal/naming/dns.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/naming/dns_test.go b/internal/naming/dns_test.go index e7e2ea9dc6..a1ff726389 100644 --- a/internal/naming/dns_test.go +++ b/internal/naming/dns_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/naming/doc.go b/internal/naming/doc.go index 72cab8b0b0..c292436460 100644 --- a/internal/naming/doc.go +++ b/internal/naming/doc.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/naming/labels.go b/internal/naming/labels.go index f25993122b..96724fda8b 100644 --- a/internal/naming/labels.go +++ b/internal/naming/labels.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/naming/labels_test.go b/internal/naming/labels_test.go index b8a7779858..552e38ceb5 100644 --- a/internal/naming/labels_test.go +++ b/internal/naming/labels_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/naming/limitations.md b/internal/naming/limitations.md index ba607215f7..1f25d1db3f 100644 --- a/internal/naming/limitations.md +++ b/internal/naming/limitations.md @@ -1,5 +1,5 @@ diff --git a/internal/naming/names.go b/internal/naming/names.go index 369591de91..849391b5de 100644 --- a/internal/naming/names.go +++ b/internal/naming/names.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -486,6 +486,15 @@ func PGBackRestRBAC(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { } } +// RepoHostRBAC returns the ObjectMeta necessary to lookup the ServiceAccount for +// the pgBackRest Repo Host +func RepoHostRBAC(cluster *v1beta1.PostgresCluster) metav1.ObjectMeta { + return metav1.ObjectMeta{ + Namespace: cluster.Namespace, + Name: cluster.Name + "-repohost", + } +} + // PGBackRestRepoVolume returns the ObjectMeta for a pgBackRest repository volume func PGBackRestRepoVolume(cluster *v1beta1.PostgresCluster, repoName string) metav1.ObjectMeta { @@ -583,11 +592,3 @@ func StandalonePGAdmin(pgadmin *v1beta1.PGAdmin) metav1.ObjectMeta { Name: fmt.Sprintf("pgadmin-%s", pgadmin.UID), } } - -// UpgradeCheckConfigMap returns the ObjectMeta for the PGO ConfigMap -func UpgradeCheckConfigMap() metav1.ObjectMeta { - return metav1.ObjectMeta{ - Namespace: config.PGONamespace(), - Name: "pgo-upgrade-check", - } -} diff --git a/internal/naming/names_test.go b/internal/naming/names_test.go index 27835c3e5d..cc8d07d113 100644 --- a/internal/naming/names_test.go +++ b/internal/naming/names_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/naming/selectors.go b/internal/naming/selectors.go index 94dbc3a9fa..c51f2d0262 100644 --- a/internal/naming/selectors.go +++ b/internal/naming/selectors.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -152,6 +152,13 @@ func ClusterPostgresUsers(cluster string) metav1.LabelSelector { } } +// ClusterPrimary selects things for the Primary PostgreSQL instance. +func ClusterPrimary(cluster string) metav1.LabelSelector { + s := ClusterInstances(cluster) + s.MatchLabels[LabelRole] = RolePatroniLeader + return s +} + // CrunchyBridgeClusterPostgresRoles selects things labeled for CrunchyBridgeCluster // PostgreSQL roles in cluster. func CrunchyBridgeClusterPostgresRoles(clusterName string) metav1.LabelSelector { diff --git a/internal/naming/selectors_test.go b/internal/naming/selectors_test.go index 1f5f42ad96..c8617bcb78 100644 --- a/internal/naming/selectors_test.go +++ b/internal/naming/selectors_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -148,6 +148,16 @@ func TestClusterPostgresUsers(t *testing.T) { assert.ErrorContains(t, err, "Invalid") } +func TestClusterPrimary(t *testing.T) { + s, err := AsSelector(ClusterPrimary("something")) + assert.NilError(t, err) + assert.DeepEqual(t, s.String(), strings.Join([]string{ + "postgres-operator.crunchydata.com/cluster=something", + "postgres-operator.crunchydata.com/instance", + "postgres-operator.crunchydata.com/role=master", + }, ",")) +} + func TestCrunchyBridgeClusterPostgresRoles(t *testing.T) { s, err := AsSelector(CrunchyBridgeClusterPostgresRoles("something")) assert.NilError(t, err) diff --git a/internal/naming/telemetry.go b/internal/naming/telemetry.go index 5825d6299f..8ac52266f0 100644 --- a/internal/naming/telemetry.go +++ b/internal/naming/telemetry.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/patroni/api.go b/internal/patroni/api.go index 679da5f4af..502a354d43 100644 --- a/internal/patroni/api.go +++ b/internal/patroni/api.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -45,7 +45,7 @@ func (exec Executor) ChangePrimaryAndWait( err := exec(ctx, nil, &stdout, &stderr, "patronictl", "switchover", "--scheduled=now", "--force", - "--master="+current, "--candidate="+next) + "--primary="+current, "--candidate="+next) log := logging.FromContext(ctx) log.V(1).Info("changed primary", @@ -65,7 +65,7 @@ func (exec Executor) ChangePrimaryAndWait( // "patronictl". It returns true when an election completes successfully. It // waits up to two "loop_wait" or until an error occurs. When Patroni is paused, // next cannot be blank. Similar to the "POST /switchover" REST endpoint. -// The "patronictl switchover" variant does not require the current master to be passed +// The "patronictl switchover" variant does not require the current primary to be passed // as a flag. func (exec Executor) SwitchoverAndWait( ctx context.Context, target string, @@ -96,7 +96,7 @@ func (exec Executor) SwitchoverAndWait( // "patronictl". It returns true when an election completes successfully. It // waits up to two "loop_wait" or until an error occurs. When Patroni is paused, // next cannot be blank. Similar to the "POST /switchover" REST endpoint. -// The "patronictl failover" variant does not require the current master to be passed +// The "patronictl failover" variant does not require the current primary to be passed // as a flag. func (exec Executor) FailoverAndWait( ctx context.Context, target string, diff --git a/internal/patroni/api_test.go b/internal/patroni/api_test.go index 1603d2fc75..513c612154 100644 --- a/internal/patroni/api_test.go +++ b/internal/patroni/api_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -36,7 +36,7 @@ func TestExecutorChangePrimaryAndWait(t *testing.T) { ) error { called = true assert.DeepEqual(t, command, strings.Fields( - `patronictl switchover --scheduled=now --force --master=old --candidate=new`, + `patronictl switchover --scheduled=now --force --primary=old --candidate=new`, )) assert.Assert(t, stdin == nil, "expected no stdin, got %T", stdin) assert.Assert(t, stderr != nil, "should capture stderr") diff --git a/internal/patroni/certificates.go b/internal/patroni/certificates.go index 9aa1525769..45db4fa2f7 100644 --- a/internal/patroni/certificates.go +++ b/internal/patroni/certificates.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/patroni/certificates.md b/internal/patroni/certificates.md index f58786ce20..14739030ef 100644 --- a/internal/patroni/certificates.md +++ b/internal/patroni/certificates.md @@ -1,5 +1,5 @@ diff --git a/internal/patroni/certificates_test.go b/internal/patroni/certificates_test.go index 3073f2247f..5c91f88691 100644 --- a/internal/patroni/certificates_test.go +++ b/internal/patroni/certificates_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/patroni/config.go b/internal/patroni/config.go index b4d7e54f68..c9487dfa52 100644 --- a/internal/patroni/config.go +++ b/internal/patroni/config.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -60,6 +60,11 @@ func clusterYAML( "role_label": naming.LabelRole, "scope_label": naming.LabelPatroni, "use_endpoints": true, + // To support transitioning to Patroni v4, set the value to 'master'. + // In a future release, this can be removed in favor of the default. + // Do this for leaders in both primary and standby clusters. + "leader_label_value": naming.RolePatroniLeader, + "standby_leader_label_value": naming.RolePatroniLeader, // In addition to "scope_label" above, Patroni will add the following to // every object it creates. It will also use these as filters when doing @@ -488,7 +493,7 @@ func instanceYAML( // created. That value should be injected using the downward API and the // PATRONI_KUBERNETES_POD_IP environment variable. - // Missing here is "ports" which is is connascent with "postgresql.connect_address". + // Missing here is "ports" which is connascent with "postgresql.connect_address". // See the PATRONI_KUBERNETES_PORTS env variable. }, @@ -565,7 +570,7 @@ func instanceYAML( postgresql[pgBackRestCreateReplicaMethod] = map[string]any{ "command": strings.Join(quoted, " "), "keep_data": true, - "no_master": true, + "no_leader": true, "no_params": true, } methods = append([]string{pgBackRestCreateReplicaMethod}, methods...) diff --git a/internal/patroni/config.md b/internal/patroni/config.md index 18d28d8a4e..ffd091601c 100644 --- a/internal/patroni/config.md +++ b/internal/patroni/config.md @@ -1,5 +1,5 @@ @@ -214,7 +214,7 @@ acquiring the leader lock, the Patroni leader: | - | postgresql.basebackup | Yes | mutable | either | List of arguments to pass to pg_basebackup when using the `basebackup` replica method. | - | postgresql.{method}.command | Yes¹ | mutable | either | Command to execute for this replica method. | - | postgresql.{method}.keep_data | Yes¹ | mutable | either | Whether or not Patroni should empty the data directory before. (default: false) -| - | postgresql.{method}.no_master | Yes¹ | mutable | either | Whether or not Patroni can call this method when no instances are running. (default: false) +| - | postgresql.{method}.no_leader | Yes¹ | mutable | either | Whether or not Patroni can call this method when no instances are running. (default: false) | - | postgresql.{method}.no_params | Yes¹ | mutable | either | Whether or not Patroni should pass extra arguments to the command. (default: false) || |||||| https://github.com/zalando/patroni/blob/v2.0.1/docs/replica_bootstrap.rst#bootstrap diff --git a/internal/patroni/config_test.go b/internal/patroni/config_test.go index 1fa51a81ae..9187dc3572 100644 --- a/internal/patroni/config_test.go +++ b/internal/patroni/config_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -54,9 +54,11 @@ ctl: kubernetes: labels: postgres-operator.crunchydata.com/cluster: cluster-name + leader_label_value: master namespace: some-namespace role_label: postgres-operator.crunchydata.com/role scope_label: postgres-operator.crunchydata.com/patroni + standby_leader_label_value: master use_endpoints: true postgresql: authentication: @@ -112,9 +114,11 @@ ctl: kubernetes: labels: postgres-operator.crunchydata.com/cluster: cluster-name + leader_label_value: master namespace: some-namespace role_label: postgres-operator.crunchydata.com/role scope_label: postgres-operator.crunchydata.com/patroni + standby_leader_label_value: master use_endpoints: true postgresql: authentication: @@ -462,8 +466,8 @@ func TestDynamicConfiguration(t *testing.T) { }, }, hbas: postgres.HBAs{ - Default: []postgres.HostBasedAuthentication{ - *postgres.NewHBA().Local().Method("peer"), + Default: []*postgres.HostBasedAuthentication{ + postgres.NewHBA().Local().Method("peer"), }, }, expected: map[string]any{ @@ -487,8 +491,8 @@ func TestDynamicConfiguration(t *testing.T) { }, }, hbas: postgres.HBAs{ - Default: []postgres.HostBasedAuthentication{ - *postgres.NewHBA().Local().Method("peer"), + Default: []*postgres.HostBasedAuthentication{ + postgres.NewHBA().Local().Method("peer"), }, }, expected: map[string]any{ @@ -512,8 +516,8 @@ func TestDynamicConfiguration(t *testing.T) { }, }, hbas: postgres.HBAs{ - Mandatory: []postgres.HostBasedAuthentication{ - *postgres.NewHBA().Local().Method("peer"), + Mandatory: []*postgres.HostBasedAuthentication{ + postgres.NewHBA().Local().Method("peer"), }, }, expected: map[string]any{ @@ -538,8 +542,8 @@ func TestDynamicConfiguration(t *testing.T) { }, }, hbas: postgres.HBAs{ - Mandatory: []postgres.HostBasedAuthentication{ - *postgres.NewHBA().Local().Method("peer"), + Mandatory: []*postgres.HostBasedAuthentication{ + postgres.NewHBA().Local().Method("peer"), }, }, expected: map[string]any{ @@ -924,7 +928,7 @@ postgresql: command: '''bash'' ''-ceu'' ''--'' ''install --directory --mode=0700 "${PGDATA?}" && exec "$@"'' ''-'' ''some'' ''backrest'' ''cmd''' keep_data: true - no_master: true + no_leader: true no_params: true pgpass: /tmp/.pgpass use_unix_socket: true @@ -995,7 +999,7 @@ func TestPGBackRestCreateReplicaCommand(t *testing.T) { file := filepath.Join(dir, "command.sh") assert.NilError(t, os.WriteFile(file, []byte(command), 0o600)) - cmd := exec.Command(shellcheck, "--enable=all", "--shell=sh", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", "--shell=sh", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } @@ -1017,7 +1021,7 @@ func TestPGBackRestCreateReplicaCommand(t *testing.T) { file := filepath.Join(dir, "script.bash") assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } @@ -1096,7 +1100,6 @@ func TestProbeTiming(t *testing.T) { FailureThreshold: 1, }}, } { - tt := tt actual := probeTiming(&v1beta1.PatroniSpec{ LeaderLeaseDurationSeconds: &tt.lease, SyncPeriodSeconds: &tt.sync, diff --git a/internal/patroni/doc.go b/internal/patroni/doc.go index 500305406d..035cdee7fe 100644 --- a/internal/patroni/doc.go +++ b/internal/patroni/doc.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/patroni/rbac.go b/internal/patroni/rbac.go index f1e55b1137..18778c3504 100644 --- a/internal/patroni/rbac.go +++ b/internal/patroni/rbac.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/patroni/rbac_test.go b/internal/patroni/rbac_test.go index 39a8dff245..587d97bea6 100644 --- a/internal/patroni/rbac_test.go +++ b/internal/patroni/rbac_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/patroni/reconcile.go b/internal/patroni/reconcile.go index 4fbb08b67d..706ec264fb 100644 --- a/internal/patroni/reconcile.go +++ b/internal/patroni/reconcile.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -171,8 +171,7 @@ func instanceProbes(cluster *v1beta1.PostgresCluster, container *corev1.Containe } // PodIsPrimary returns whether or not pod is currently acting as the leader with -// the "master" role. This role will be called "primary" in the future, see: -// - https://github.com/zalando/patroni/blob/master/docs/releases.rst?plain=1#L213 +// the "primary" role. func PodIsPrimary(pod metav1.Object) bool { if pod == nil { return false @@ -185,7 +184,8 @@ func PodIsPrimary(pod metav1.Object) bool { // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L782 // - https://github.com/zalando/patroni/blob/v3.1.1/patroni/ha.py#L1574 status := pod.GetAnnotations()["status"] - return strings.Contains(status, `"role":"master"`) + return strings.Contains(status, `"role":"master"`) || + strings.Contains(status, `"role":"primary"`) } // PodIsStandbyLeader returns whether or not pod is currently acting as a "standby_leader". diff --git a/internal/patroni/reconcile_test.go b/internal/patroni/reconcile_test.go index 5d2a2c0ad5..d0f280c3a6 100644 --- a/internal/patroni/reconcile_test.go +++ b/internal/patroni/reconcile_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -241,7 +241,7 @@ func TestPodIsPrimary(t *testing.T) { assert.Assert(t, !PodIsPrimary(pod)) // Primary - pod.Annotations["status"] = `{"role":"master"}` + pod.Annotations["status"] = `{"role":"primary"}` assert.Assert(t, PodIsPrimary(pod)) } @@ -258,7 +258,7 @@ func TestPodIsStandbyLeader(t *testing.T) { assert.Assert(t, !PodIsStandbyLeader(pod)) // Leader - pod.Annotations["status"] = `{"role":"master"}` + pod.Annotations["status"] = `{"role":"primary"}` assert.Assert(t, !PodIsStandbyLeader(pod)) // Replica diff --git a/internal/pgadmin/config.go b/internal/pgadmin/config.go index 553a90f656..2dbe3a2e49 100644 --- a/internal/pgadmin/config.go +++ b/internal/pgadmin/config.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -160,7 +160,7 @@ if os.path.isfile('` + ldapPasswordAbsolutePath + `'): // systemSettings returns pgAdmin settings as a value that can be marshaled to JSON. func systemSettings(spec *v1beta1.PGAdminPodSpec) map[string]interface{} { - settings := *spec.Config.Settings.DeepCopy() + settings := spec.Config.Settings.DeepCopy() if settings == nil { settings = make(map[string]interface{}) } diff --git a/internal/pgadmin/config_test.go b/internal/pgadmin/config_test.go index 87cd7847c2..0e659c7070 100644 --- a/internal/pgadmin/config_test.go +++ b/internal/pgadmin/config_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -77,7 +77,7 @@ func TestStartupCommand(t *testing.T) { assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) }) @@ -94,7 +94,7 @@ func TestStartupCommand(t *testing.T) { // Expect flake8 to be happy. Ignore "E401 multiple imports on one line" // in addition to the defaults. The file contents appear in PodSpec, so // allow lines longer than the default to save some vertical space. - cmd := exec.Command(flake8, "--extend-ignore=E401", "--max-line-length=99", file) + cmd := exec.CommandContext(t.Context(), flake8, "--extend-ignore=E401", "--max-line-length=99", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) }) diff --git a/internal/pgadmin/reconcile.go b/internal/pgadmin/reconcile.go index af62c482f2..cefb179dc2 100644 --- a/internal/pgadmin/reconcile.go +++ b/internal/pgadmin/reconcile.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgadmin/reconcile_test.go b/internal/pgadmin/reconcile_test.go index f91a9b807f..fcbdf589e3 100644 --- a/internal/pgadmin/reconcile_test.go +++ b/internal/pgadmin/reconcile_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgadmin/users.go b/internal/pgadmin/users.go index 7ce69ce211..6c93fcd5d2 100644 --- a/internal/pgadmin/users.go +++ b/internal/pgadmin/users.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgadmin/users_test.go b/internal/pgadmin/users_test.go index 69619667af..18bfd1efd3 100644 --- a/internal/pgadmin/users_test.go +++ b/internal/pgadmin/users_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -180,7 +180,7 @@ with create_app().app_context(): // Expect flake8 to be happy. Ignore "E402 module level import not // at top of file" in addition to the defaults. - cmd := exec.Command(flake8, "--extend-ignore=E402", file) + cmd := exec.CommandContext(ctx, flake8, "--extend-ignore=E402", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) diff --git a/internal/pgaudit/postgres.go b/internal/pgaudit/postgres.go index 07867d020e..27a0ffd720 100644 --- a/internal/pgaudit/postgres.go +++ b/internal/pgaudit/postgres.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -35,7 +35,9 @@ func EnableInPostgreSQL(ctx context.Context, exec postgres.Executor) error { stdout, stderr, err := exec.ExecInAllDatabases(ctx, // Quiet the NOTICE from IF EXISTS, and install the pgAudit event triggers. + // Use the default setting for "synchronous_commit". // - https://www.postgresql.org/docs/current/runtime-config-client.html + // - https://www.postgresql.org/docs/current/runtime-config-wal.html // - https://github.com/pgaudit/pgaudit#settings `SET client_min_messages = WARNING; CREATE EXTENSION IF NOT EXISTS pgaudit;`, map[string]string{ diff --git a/internal/pgaudit/postgres_test.go b/internal/pgaudit/postgres_test.go index 3734e511f0..62854793ea 100644 --- a/internal/pgaudit/postgres_test.go +++ b/internal/pgaudit/postgres_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/certificates.go b/internal/pgbackrest/certificates.go index bb2633dfe7..88262a3074 100644 --- a/internal/pgbackrest/certificates.go +++ b/internal/pgbackrest/certificates.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/certificates.md b/internal/pgbackrest/certificates.md index 344616486b..898cf512cd 100644 --- a/internal/pgbackrest/certificates.md +++ b/internal/pgbackrest/certificates.md @@ -1,5 +1,5 @@ diff --git a/internal/pgbackrest/certificates_test.go b/internal/pgbackrest/certificates_test.go index 4ef41b2879..3f9e157f81 100644 --- a/internal/pgbackrest/certificates_test.go +++ b/internal/pgbackrest/certificates_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/config.go b/internal/pgbackrest/config.go index f50b2690ee..e10a853ebd 100644 --- a/internal/pgbackrest/config.go +++ b/internal/pgbackrest/config.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -34,6 +34,10 @@ const ( // repository host CMRepoKey = "pgbackrest_repo.conf" + // CMCloudRepoKey is the name of the pgBackRest configuration file used by backup jobs + // for cloud repos + CMCloudRepoKey = "pgbackrest_cloud.conf" + // configDirectory is the pgBackRest configuration directory. configDirectory = "/etc/pgbackrest/conf.d" @@ -65,8 +69,9 @@ const ( // pgbackrest_job.conf is used by certain jobs, such as stanza create and backup // pgbackrest_primary.conf is used by the primary database pod // pgbackrest_repo.conf is used by the pgBackRest repository pod +// pgbackrest_cloud.conf is used by cloud repo backup jobs func CreatePGBackRestConfigMapIntent(postgresCluster *v1beta1.PostgresCluster, - repoHostName, configHash, serviceName, serviceNamespace string, + repoHostName, configHash, serviceName, serviceNamespace, cloudLogPath string, instanceNames []string) *corev1.ConfigMap { meta := naming.PGBackRestConfig(postgresCluster) @@ -102,17 +107,14 @@ func CreatePGBackRestConfigMapIntent(postgresCluster *v1beta1.PostgresCluster, postgresCluster.Spec.Backups.PGBackRest.Global, ).String() + // As the cluster transitions from having a repository host to having none, // PostgreSQL instances that have not rolled out expect to mount a server // config file. Always populate that file so those volumes stay valid and - // Kubernetes propagates their contents to those pods. The repo host name - // given below should always be set, but this guards for cases when it might - // not be. - cm.Data[serverConfigMapKey] = "" - - if repoHostName != "" { - cm.Data[serverConfigMapKey] = iniGeneratedWarning + - serverConfig(postgresCluster).String() + // Kubernetes propagates their contents to those pods. + cm.Data[serverConfigMapKey] = iniGeneratedWarning + + serverConfig(postgresCluster).String() + if RepoHostVolumeDefined(postgresCluster) && repoHostName != "" { cm.Data[CMRepoKey] = iniGeneratedWarning + populateRepoHostConfigurationMap( serviceName, serviceNamespace, @@ -124,6 +126,18 @@ func CreatePGBackRestConfigMapIntent(postgresCluster *v1beta1.PostgresCluster, ).String() } + if CloudRepoDefined(postgresCluster) { + cm.Data[CMCloudRepoKey] = iniGeneratedWarning + + populateCloudRepoConfigurationMap( + serviceName, serviceNamespace, pgdataDir, + config.FetchKeyCommand(&postgresCluster.Spec), + strconv.Itoa(postgresCluster.Spec.PostgresVersion), + cloudLogPath, pgPort, instanceNames, + postgresCluster.Spec.Backups.PGBackRest.Repos, + postgresCluster.Spec.Backups.PGBackRest.Global, + ).String() + } + cm.Data[ConfigHashKey] = configHash return cm @@ -174,7 +188,7 @@ func MakePGBackrestLogDir(template *corev1.PodTemplateSpec, func RestoreCommand(pgdata, hugePagesSetting, fetchKeyCommand string, tablespaceVolumes []*corev1.PersistentVolumeClaim, args ...string) []string { // After pgBackRest restores files, PostgreSQL starts in recovery to finish - // replaying WAL files. "hot_standby" is "on" (by default) so we can detect + // replaying WAL files. "hot_standby" is "on" so we can detect // when recovery has finished. In that mode, some parameters cannot be // smaller than they were when PostgreSQL was backed up. Configure them to // match the values reported by "pg_controldata". Those parameters are also @@ -233,6 +247,7 @@ cat > /tmp/postgres.restore.conf <> /tmp/postgres.restore.conf "max_wal_senders = '${max_wals}'" fi -pg_ctl start --silent --timeout=31536000 --wait --options='--config-file=/tmp/postgres.restore.conf' +read -r stopped <<< "${control##*recovery ending location:}" +pg_ctl start --silent --timeout=31536000 --wait --options='--config-file=/tmp/postgres.restore.conf' || failed=$? +[[ "${started-}" == "${stopped}" && -n "${failed-}" ]] && exit "${failed}" +started="${stopped}" && [[ -n "${failed-}" ]] && failed= && continue fi recovery=$(psql -Atc "SELECT CASE @@ -443,6 +461,69 @@ func populateRepoHostConfigurationMap( } } +func populateCloudRepoConfigurationMap( + serviceName, serviceNamespace, pgdataDir, + fetchKeyCommand, postgresVersion, logPath string, + pgPort int32, pgHosts []string, repos []v1beta1.PGBackRestRepo, + globalConfig map[string]string, +) iniSectionSet { + + global := iniMultiSet{} + stanza := iniMultiSet{} + + for _, repo := range repos { + if repo.Volume != nil { + continue + } + + global.Set(repo.Name+"-path", defaultRepo1Path+repo.Name) + + for option, val := range getExternalRepoConfigs(repo) { + global.Set(option, val) + } + } + + // If we are given a log path, set it in the config. Otherwise, turn off logging to file. + if logPath != "" { + global.Set("log-path", logPath) + } else { + global.Set("log-level-file", "off") + } + + for option, val := range globalConfig { + global.Set(option, val) + } + + // set the configs for all PG hosts + for i, pgHost := range pgHosts { + // TODO(cbandy): pass a FQDN in already. + pgHostFQDN := pgHost + "-0." + + serviceName + "." + serviceNamespace + ".svc." + + naming.KubernetesClusterDomain(context.Background()) + + stanza.Set(fmt.Sprintf("pg%d-host", i+1), pgHostFQDN) + stanza.Set(fmt.Sprintf("pg%d-host-type", i+1), "tls") + stanza.Set(fmt.Sprintf("pg%d-host-ca-file", i+1), certAuthorityAbsolutePath) + stanza.Set(fmt.Sprintf("pg%d-host-cert-file", i+1), certClientAbsolutePath) + stanza.Set(fmt.Sprintf("pg%d-host-key-file", i+1), certClientPrivateKeyAbsolutePath) + + stanza.Set(fmt.Sprintf("pg%d-path", i+1), pgdataDir) + stanza.Set(fmt.Sprintf("pg%d-port", i+1), fmt.Sprint(pgPort)) + stanza.Set(fmt.Sprintf("pg%d-socket-path", i+1), postgres.SocketDirectory) + + if fetchKeyCommand != "" { + stanza.Set("archive-header-check", "n") + stanza.Set("page-header-check", "n") + stanza.Set("pg-version-force", postgresVersion) + } + } + + return iniSectionSet{ + "global": global, + DefaultStanzaName: stanza, + } +} + // getExternalRepoConfigs returns a map containing the configuration settings for an external // pgBackRest repository as defined in the PostgresCluster spec func getExternalRepoConfigs(repo v1beta1.PGBackRestRepo) map[string]string { diff --git a/internal/pgbackrest/config.md b/internal/pgbackrest/config.md index 2101535b3a..f19c9ac1e4 100644 --- a/internal/pgbackrest/config.md +++ b/internal/pgbackrest/config.md @@ -1,22 +1,22 @@ # pgBackRest Configuration Overview -The initial pgBackRest configuration for the Postgres Clusters is designed to stand up a +The initial pgBackRest configuration for the Postgres Clusters is designed to stand up a minimal configuration for use by the various pgBackRest functions needed by the Postgres cluster. These settings are meant to be the minimally required settings, with other settings supported through the use of custom configurations. -During initial cluster creation, four pgBackRest use cases are involved. +During initial cluster creation, four pgBackRest use cases are involved. -These settings are configured in either the [global] or [stanza] sections of the +These settings are configured in either the [global] or [stanza] sections of the pgBackRest configuration based on their designation in the pgBackRest code. For more information on the above, and other settings, please see -https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c + As shown, the settings with the `cfgSectionGlobal` designation are @@ -24,18 +24,17 @@ As shown, the settings with the `cfgSectionGlobal` designation are `log-level-file`: Level for file logging. Set to 'off' when the repo host has no volume. -`repo-path`: Path where backups and archive are stored. +`repo-path`: Path where backups and archive are stored. The repository is where pgBackRest stores backups and archives WAL segments. `repo-host`: Repository host when operating remotely via TLS. - The settings with the `cfgSectionStanza` designation are `pg-host`: PostgreSQL host for operating remotely via TLS. `pg-path`: The path of the PostgreSQL data directory. - This should be the same as the data_directory setting in postgresql.conf. + This should be the same as the data_directory setting in postgresql.conf. `pg-port`: The port that PostgreSQL is running on. @@ -44,14 +43,13 @@ The settings with the `cfgSectionStanza` designation are For more information on these and other configuration settings, please see `https://pgbackrest.org/configuration.html`. -# Configuration Per Function +## Configuration Per Function -Below, each of the four configuration sets is outlined by use case. Please note that certain -settings have acceptable defaults for the cluster's usage (such as for `repo1-type` which +Below, each of the four configuration sets is outlined by use case. Please note that certain +settings have acceptable defaults for the cluster's usage (such as for `repo1-type` which defaults to `posix`), so those settings are not included. - -1. Primary Database Pod +1. Primary Database Pod [global] log-path @@ -86,28 +84,26 @@ log-path [global] log-path - -# Initial pgBackRest Configuration +## Initial pgBackRest Configuration In order to be used by the Postgres cluster, these default configurations are stored in -a configmap. This configmap is named with the following convention `-pgbackrest-config`, +a configmap. This configmap is named with the following convention `-pgbackrest-config`, such that a cluster named 'mycluster' would have a configuration configmap named `mycluster-pgbackrest-config`. -As noted above, there are three distinct default configurations, each of which is referenced +As noted above, there are three distinct default configurations, each of which is referenced by a key value in the configmap's data section. For the primary database pod, the key is `pgbackrest_primary.conf`. For the pgBackRest repo pod, the key is `pgbackrest_repo.conf`. Finally, for the pgBackRest stanza job pod and the initial pgBackRest backup job pod, the key is `pgbackrest_job.conf`. - -For each pod, the relevant configuration file is mounted as a projected volume named + +For each pod, the relevant configuration file is mounted as a projected volume named `pgbackrest-config-vol`. The configuration file will be found in the `/etc/pgbackrest` directory -of the relevant container and is named `pgbackrest.conf`, matching the default pgBackRest location. -For more information, please see +of the relevant container and is named `pgbackrest.conf`, matching the default pgBackRest location. +For more information, please see `https://pgbackrest.org/configuration.html#introduction` - -# Custom Configuration Support +## Custom Configuration Support TODO(tjmoore4): Document custom configuration solution once implemented @@ -116,7 +112,7 @@ flag with the desired pgBackRest command. This should point to the directory pat where the `*.conf` file with the custom configuration is located. This file will be added as a projected volume and must be formatted in the standard -pgBackRest INI convention. Please note that any of the configuration settings listed +pgBackRest INI convention. Please note that any of the configuration settings listed above MUST BE CONFIGURED VIA THE POSTGRESCLUSTER SPEC so as to avoid errors. For more information, please see @@ -140,7 +136,7 @@ command-line or top-to-bottom in INI files. The remaining options must be set exactly once. `pgbackrest` exits non-zero when the option occurs twice on the command-line or twice in a file: -``` +```text ERROR: [031]: option 'io-timeout' cannot be set multiple times ``` diff --git a/internal/pgbackrest/config_test.go b/internal/pgbackrest/config_test.go index b74bf9a4a8..8dec601144 100644 --- a/internal/pgbackrest/config_test.go +++ b/internal/pgbackrest/config_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -34,20 +34,268 @@ func TestCreatePGBackRestConfigMapIntent(t *testing.T) { domain := naming.KubernetesClusterDomain(context.Background()) - t.Run("NoVolumeRepo", func(t *testing.T) { + t.Run("NoRepos", func(t *testing.T) { + // We always create the config for the pgbackrest instance and server cluster := cluster.DeepCopy() cluster.Spec.Backups.PGBackRest.Repos = nil + cluster.UID = "piano" configmap := CreatePGBackRestConfigMapIntent(cluster, - "", "number", "pod-service-name", "test-ns", + "", "number", "pod-service-name", "test-ns", "", []string{"some-instance"}) assert.Equal(t, configmap.Data["config-hash"], "number") - assert.Equal(t, configmap.Data["pgbackrest-server.conf"], "") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@piano=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +spool-path = /pgdata/pgbackrest-spool + +[db] +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], "") + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], "") }) - t.Run("DedicatedRepoHost", func(t *testing.T) { + t.Run("CloudRepoPresentNoVolumeRepo", func(t *testing.T) { cluster := cluster.DeepCopy() + cluster.UID = "ukulele" + cluster.Spec.Backups.PGBackRest.Global = map[string]string{ + "repo1-test": "something", + } + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + GCS: &v1beta1.RepoGCS{Bucket: "g-bucket"}, + }, + } + + configmap := CreatePGBackRestConfigMapIntent(cluster, + "", "anumber", "pod-service-name", "test-ns", "", + []string{"some-instance"}) + + configmapWithCloudLogging := CreatePGBackRestConfigMapIntent(cluster, + "", "anumber", "pod-service-name", "test-ns", "/a/log/path", + []string{"some-instance"}) + + assert.DeepEqual(t, configmap.Annotations, map[string]string{}) + assert.DeepEqual(t, configmapWithCloudLogging.Annotations, map[string]string{}) + + assert.DeepEqual(t, configmap.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", + }) + assert.DeepEqual(t, configmapWithCloudLogging.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", + }) + + assert.Equal(t, configmap.Data["config-hash"], "anumber") + assert.Equal(t, configmapWithCloudLogging.Data["config-hash"], "anumber") + + serverConfigExpectation := strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@ukulele=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], serverConfigExpectation+"\n") + assert.Equal(t, configmapWithCloudLogging.Data["pgbackrest-server.conf"], serverConfigExpectation+"\n") + + instanceConfigExpectation := strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +repo1-gcs-bucket = g-bucket +repo1-path = /pgbackrest/repo1 +repo1-test = something +repo1-type = gcs +spool-path = /pgdata/pgbackrest-spool + +[db] +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n") + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], instanceConfigExpectation+"\n") + assert.Equal(t, configmapWithCloudLogging.Data["pgbackrest_instance.conf"], instanceConfigExpectation+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-level-file = off +repo1-gcs-bucket = g-bucket +repo1-path = /pgbackrest/repo1 +repo1-test = something +repo1-type = gcs + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmapWithCloudLogging.Data["pgbackrest_cloud.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-path = /a/log/path +repo1-gcs-bucket = g-bucket +repo1-path = /pgbackrest/repo1 +repo1-test = something +repo1-type = gcs + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], "") + assert.Equal(t, configmapWithCloudLogging.Data["pgbackrest_repo.conf"], "") + }) + + t.Run("VolumeRepoPresentNoCloudRepo", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.UID = "guitar" + cluster.Spec.Backups.PGBackRest.Repos = []v1beta1.PGBackRestRepo{ + { + Name: "repo1", + Volume: &v1beta1.RepoPVC{}, + }, + } + + configmap := CreatePGBackRestConfigMapIntent(cluster, + "repo-hostname", "anumber", "pod-service-name", "test-ns", "", + []string{"some-instance"}) + + assert.DeepEqual(t, configmap.Annotations, map[string]string{}) + assert.DeepEqual(t, configmap.Labels, map[string]string{ + "postgres-operator.crunchydata.com/cluster": "hippo-dance", + "postgres-operator.crunchydata.com/pgbackrest": "", + "postgres-operator.crunchydata.com/pgbackrest-config": "", + }) + + assert.Equal(t, configmap.Data["config-hash"], "anumber") + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@guitar=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_instance.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +archive-async = y +log-path = /pgdata/pgbackrest/log +repo1-host = repo-hostname-0.pod-service-name.test-ns.svc.`+domain+` +repo1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +repo1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +repo1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +repo1-host-type = tls +repo1-host-user = postgres +repo1-path = /pgbackrest/repo1 +spool-path = /pgdata/pgbackrest-spool + +[db] +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-path = /pgbackrest/repo1/log +repo1-path = /pgbackrest/repo1 + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], "") + }) + + t.Run("DedicatedRepoHostAndCloudRepos", func(t *testing.T) { + cluster := cluster.DeepCopy() + cluster.UID = "bass" cluster.Spec.Backups.PGBackRest.Global = map[string]string{ "repo3-test": "something", } @@ -73,7 +321,7 @@ func TestCreatePGBackRestConfigMapIntent(t *testing.T) { } configmap := CreatePGBackRestConfigMapIntent(cluster, - "repo-hostname", "abcde12345", "pod-service-name", "test-ns", + "repo-hostname", "abcde12345", "pod-service-name", "test-ns", "", []string{"some-instance"}) assert.DeepEqual(t, configmap.Annotations, map[string]string{}) @@ -84,6 +332,25 @@ func TestCreatePGBackRestConfigMapIntent(t *testing.T) { }) assert.Equal(t, configmap.Data["config-hash"], "abcde12345") + + assert.Equal(t, configmap.Data["pgbackrest-server.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +tls-server-address = 0.0.0.0 +tls-server-auth = pgbackrest@bass=* +tls-server-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +tls-server-cert-file = /etc/pgbackrest/server/server-tls.crt +tls-server-key-file = /etc/pgbackrest/server/server-tls.key + +[global:server] +log-level-console = detail +log-level-file = off +log-level-stderr = error +log-timestamp = n + `, "\t\n")+"\n") + assert.Equal(t, configmap.Data["pgbackrest_repo.conf"], strings.Trim(` # Generated by postgres-operator. DO NOT EDIT. # Your changes will not be saved. @@ -146,6 +413,36 @@ spool-path = /pgdata/pgbackrest-spool [db] pg1-path = /pgdata/pg12 pg1-port = 2345 +pg1-socket-path = /tmp/postgres + `, "\t\n")+"\n") + + assert.Equal(t, configmap.Data["pgbackrest_cloud.conf"], strings.Trim(` +# Generated by postgres-operator. DO NOT EDIT. +# Your changes will not be saved. + +[global] +log-level-file = off +repo2-azure-container = a-container +repo2-path = /pgbackrest/repo2 +repo2-type = azure +repo3-gcs-bucket = g-bucket +repo3-path = /pgbackrest/repo3 +repo3-test = something +repo3-type = gcs +repo4-path = /pgbackrest/repo4 +repo4-s3-bucket = s-bucket +repo4-s3-endpoint = endpoint-s +repo4-s3-region = earth +repo4-type = s3 + +[db] +pg1-host = some-instance-0.pod-service-name.test-ns.svc.`+domain+` +pg1-host-ca-file = /etc/pgbackrest/conf.d/~postgres-operator/tls-ca.crt +pg1-host-cert-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.crt +pg1-host-key-file = /etc/pgbackrest/conf.d/~postgres-operator/client-tls.key +pg1-host-type = tls +pg1-path = /pgdata/pg12 +pg1-port = 2345 pg1-socket-path = /tmp/postgres `, "\t\n")+"\n") }) @@ -178,7 +475,7 @@ pg1-socket-path = /tmp/postgres } configmap := CreatePGBackRestConfigMapIntent(cluster, - "any", "any", "any", "any", nil) + "any", "any", "any", "any", "any", nil) assert.DeepEqual(t, configmap.Annotations, map[string]string{ "ak1": "cluster-av1", @@ -209,7 +506,7 @@ pg1-socket-path = /tmp/postgres } configmap := CreatePGBackRestConfigMapIntent(cluster, - "", "number", "pod-service-name", "test-ns", + "", "number", "pod-service-name", "test-ns", "", []string{"some-instance"}) assert.Assert(t, @@ -230,7 +527,7 @@ pg1-socket-path = /tmp/postgres } configmap = CreatePGBackRestConfigMapIntent(cluster, - "repo1", "number", "pod-service-name", "test-ns", + "repo1", "number", "pod-service-name", "test-ns", "", []string{"some-instance"}) assert.Assert(t, @@ -317,7 +614,7 @@ func TestReloadCommand(t *testing.T) { assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } @@ -345,7 +642,7 @@ func TestRestoreCommand(t *testing.T) { file := filepath.Join(dir, "script.bash") assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } @@ -382,7 +679,7 @@ func TestDedicatedSnapshotVolumeRestoreCommand(t *testing.T) { file := filepath.Join(dir, "script.bash") assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } diff --git a/internal/pgbackrest/iana.go b/internal/pgbackrest/iana.go index c6e2f71e6c..70d332cb73 100644 --- a/internal/pgbackrest/iana.go +++ b/internal/pgbackrest/iana.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/options.go b/internal/pgbackrest/options.go index 2439901e47..a768f7c37d 100644 --- a/internal/pgbackrest/options.go +++ b/internal/pgbackrest/options.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/options_test.go b/internal/pgbackrest/options_test.go index 374737ec7f..3652dd94bd 100644 --- a/internal/pgbackrest/options_test.go +++ b/internal/pgbackrest/options_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/pgbackrest.go b/internal/pgbackrest/pgbackrest.go index 21124b9744..e14fbf5fbc 100644 --- a/internal/pgbackrest/pgbackrest.go +++ b/internal/pgbackrest/pgbackrest.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/pgbackrest_test.go b/internal/pgbackrest/pgbackrest_test.go index 33c97913cf..5bb1d571bb 100644 --- a/internal/pgbackrest/pgbackrest_test.go +++ b/internal/pgbackrest/pgbackrest_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -13,12 +13,10 @@ import ( "testing" "gotest.tools/v3/assert" - "k8s.io/apimachinery/pkg/api/resource" - corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -94,7 +92,7 @@ fi assert.NilError(t, os.WriteFile(file, []byte(shellCheckScript), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(ctx, shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } diff --git a/internal/pgbackrest/postgres.go b/internal/pgbackrest/postgres.go index ab5c71868c..0d05041c75 100644 --- a/internal/pgbackrest/postgres.go +++ b/internal/pgbackrest/postgres.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/postgres_test.go b/internal/pgbackrest/postgres_test.go index b87b35631a..4ec215cec6 100644 --- a/internal/pgbackrest/postgres_test.go +++ b/internal/pgbackrest/postgres_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/rbac.go b/internal/pgbackrest/rbac.go index 56e8d27986..72699dc53f 100644 --- a/internal/pgbackrest/rbac.go +++ b/internal/pgbackrest/rbac.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/rbac_test.go b/internal/pgbackrest/rbac_test.go index a620276f64..1db638e43e 100644 --- a/internal/pgbackrest/rbac_test.go +++ b/internal/pgbackrest/rbac_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbackrest/reconcile.go b/internal/pgbackrest/reconcile.go index d22bccc3c0..5c1db92024 100644 --- a/internal/pgbackrest/reconcile.go +++ b/internal/pgbackrest/reconcile.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -103,17 +103,17 @@ func AddConfigToInstancePod( configmap.ConfigMap.Items = []corev1.KeyToPath{ {Key: CMInstanceKey, Path: CMInstanceKey}, {Key: ConfigHashKey, Path: ConfigHashKey}, + {Key: serverConfigMapKey, Path: serverConfigProjectionPath}, } + // As the cluster transitions from having a repository host to having none, + // PostgreSQL instances that have not rolled out expect to mount client + // certificates. Specify those files are optional so the configuration + // volumes stay valid and Kubernetes propagates their contents to those pods. secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} secret.Secret.Name = naming.PGBackRestSecret(cluster).Name - - configmap.ConfigMap.Items = append( - configmap.ConfigMap.Items, corev1.KeyToPath{ - Key: serverConfigMapKey, - Path: serverConfigProjectionPath, - }) secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) + secret.Secret.Optional = initialize.Bool(true) // Start with a copy of projections specified in the cluster. Items later in // the list take precedence over earlier items (that is, last write wins). @@ -130,7 +130,7 @@ func AddConfigToInstancePod( addConfigVolumeAndMounts(pod, sources) } -// AddConfigToRepoPod adds and mounts the pgBackRest configuration volume for +// AddConfigToRepoPod adds and mounts the pgBackRest configuration volumes for // the dedicated repository host of cluster to pod. The pgBackRest containers // must already be in pod. func AddConfigToRepoPod( @@ -157,6 +157,33 @@ func AddConfigToRepoPod( addConfigVolumeAndMounts(pod, append(sources, configmap, secret)) } +// AddConfigToCloudBackupJob adds and mounts the pgBackRest configuration volumes +// to the backup job for creating a backup to a cloud repo. +func AddConfigToCloudBackupJob( + cluster *v1beta1.PostgresCluster, podTemplateSpec *corev1.PodTemplateSpec, +) { + configmap := corev1.VolumeProjection{ConfigMap: &corev1.ConfigMapProjection{}} + configmap.ConfigMap.Name = naming.PGBackRestConfig(cluster).Name + configmap.ConfigMap.Items = []corev1.KeyToPath{ + {Key: CMCloudRepoKey, Path: CMCloudRepoKey}, + } + + secret := corev1.VolumeProjection{Secret: &corev1.SecretProjection{}} + secret.Secret.Name = naming.PGBackRestSecret(cluster).Name + secret.Secret.Items = append(secret.Secret.Items, clientCertificates()...) + + // Start with a copy of projections specified in the cluster. Items later in + // the list take precedence over earlier items (that is, last write wins). + // - https://kubernetes.io/docs/concepts/storage/volumes/#projected + sources := append([]corev1.VolumeProjection{}, + cluster.Spec.Backups.PGBackRest.Configuration...) + + addConfigVolumeAndMounts(&podTemplateSpec.Spec, append(sources, configmap, secret)) + + // Add tmp directory for pgbackrest lock files + AddTMPEmptyDir(podTemplateSpec) +} + // AddConfigToRestorePod adds and mounts the pgBackRest configuration volume // for the restore job of cluster to pod. The pgBackRest containers must // already be in pod. @@ -508,38 +535,36 @@ func Secret(ctx context.Context, var err error // Save the CA and generate a TLS client certificate for the entire cluster. - if inRepoHost != nil { - initialize.Map(&outSecret.Data) - - // The server verifies its "tls-server-auth" option contains the common - // name (CN) of the certificate presented by a client. The entire - // cluster uses a single client certificate so the "tls-server-auth" - // option can stay the same when PostgreSQL instances and repository - // hosts are added or removed. - leaf := &pki.LeafCertificate{} - commonName := clientCommonName(inCluster) - dnsNames := []string{commonName} + initialize.Map(&outSecret.Data) - if err == nil { - // Unmarshal and validate the stored leaf. These first errors can - // be ignored because they result in an invalid leaf which is then - // correctly regenerated. - _ = leaf.Certificate.UnmarshalText(inSecret.Data[certClientSecretKey]) - _ = leaf.PrivateKey.UnmarshalText(inSecret.Data[certClientPrivateKeySecretKey]) + // The server verifies its "tls-server-auth" option contains the common + // name (CN) of the certificate presented by a client. The entire + // cluster uses a single client certificate so the "tls-server-auth" + // option can stay the same when PostgreSQL instances and repository + // hosts are added or removed. + leaf := &pki.LeafCertificate{} + commonName := clientCommonName(inCluster) + dnsNames := []string{commonName} - leaf, err = inRoot.RegenerateLeafWhenNecessary(leaf, commonName, dnsNames) - err = errors.WithStack(err) - } + if err == nil { + // Unmarshal and validate the stored leaf. These first errors can + // be ignored because they result in an invalid leaf which is then + // correctly regenerated. + _ = leaf.Certificate.UnmarshalText(inSecret.Data[certClientSecretKey]) + _ = leaf.PrivateKey.UnmarshalText(inSecret.Data[certClientPrivateKeySecretKey]) + + leaf, err = inRoot.RegenerateLeafWhenNecessary(leaf, commonName, dnsNames) + err = errors.WithStack(err) + } - if err == nil { - outSecret.Data[certAuthoritySecretKey], err = certFile(inRoot.Certificate) - } - if err == nil { - outSecret.Data[certClientPrivateKeySecretKey], err = certFile(leaf.PrivateKey) - } - if err == nil { - outSecret.Data[certClientSecretKey], err = certFile(leaf.Certificate) - } + if err == nil { + outSecret.Data[certAuthoritySecretKey], err = certFile(inRoot.Certificate) + } + if err == nil { + outSecret.Data[certClientPrivateKeySecretKey], err = certFile(leaf.PrivateKey) + } + if err == nil { + outSecret.Data[certClientSecretKey], err = certFile(leaf.Certificate) } // Generate a TLS server certificate for each repository host. diff --git a/internal/pgbackrest/reconcile_test.go b/internal/pgbackrest/reconcile_test.go index 4957d58f7b..fcdb131cce 100644 --- a/internal/pgbackrest/reconcile_test.go +++ b/internal/pgbackrest/reconcile_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -128,7 +128,7 @@ func TestAddRepoVolumesToPod(t *testing.T) { for _, r := range tc.repos { var foundVolume bool for _, v := range template.Spec.Volumes { - if v.Name == r.Name && v.VolumeSource.PersistentVolumeClaim.ClaimName == + if v.Name == r.Name && v.PersistentVolumeClaim.ClaimName == naming.PGBackRestRepoVolume(postgresCluster, r.Name).Name { foundVolume = true break @@ -244,6 +244,7 @@ func TestAddConfigToInstancePod(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest + optional: true `)) }) @@ -279,6 +280,7 @@ func TestAddConfigToInstancePod(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest + optional: true `)) }) @@ -319,6 +321,7 @@ func TestAddConfigToInstancePod(t *testing.T) { mode: 384 path: ~postgres-operator/client-tls.key name: hippo-pgbackrest + optional: true `)) }) } @@ -396,6 +399,84 @@ func TestAddConfigToRepoPod(t *testing.T) { }) } +func TestAddConfigToCloudBackupJob(t *testing.T) { + cluster := v1beta1.PostgresCluster{} + cluster.Name = "hippo" + cluster.Default() + + podTemplate := corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "other"}, + {Name: "pgbackrest"}, + }, + }, + } + + alwaysExpect := func(t testing.TB, result *corev1.PodSpec) { + // Only Containers and Volumes fields have changed. + assert.DeepEqual(t, podTemplate.Spec, *result, cmpopts.IgnoreFields(podTemplate.Spec, "Containers", "Volumes")) + + // Only pgBackRest container has config mount, but tmp dir is mounted to all containers + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` +- name: other + resources: {} + volumeMounts: + - mountPath: /tmp + name: tmp +- name: pgbackrest + resources: {} + volumeMounts: + - mountPath: /etc/pgbackrest/conf.d + name: pgbackrest-config + readOnly: true + - mountPath: /tmp + name: tmp + `)) + } + + t.Run("CustomProjections", func(t *testing.T) { + custom := corev1.ConfigMapProjection{} + custom.Name = "custom-configmap" + + cluster := cluster.DeepCopy() + cluster.Spec.Backups.PGBackRest.Configuration = []corev1.VolumeProjection{ + {ConfigMap: &custom}, + } + + out := podTemplate.DeepCopy() + AddConfigToCloudBackupJob(cluster, out) + alwaysExpect(t, &out.Spec) + + // Cloud backup configuration files and client certificates + // after custom projections. + assert.Assert(t, cmp.MarshalMatches(out.Spec.Volumes, ` +- name: pgbackrest-config + projected: + sources: + - configMap: + name: custom-configmap + - configMap: + items: + - key: pgbackrest_cloud.conf + path: pgbackrest_cloud.conf + name: hippo-pgbackrest-config + - secret: + items: + - key: pgbackrest.ca-roots + path: ~postgres-operator/tls-ca.crt + - key: pgbackrest-client.crt + path: ~postgres-operator/client-tls.crt + - key: pgbackrest-client.key + mode: 384 + path: ~postgres-operator/client-tls.key + name: hippo-pgbackrest +- emptyDir: + sizeLimit: 16Mi + name: tmp`)) + }) +} + func TestAddConfigToRestorePod(t *testing.T) { cluster := v1beta1.PostgresCluster{} cluster.Name = "source" @@ -1025,10 +1106,13 @@ func TestSecret(t *testing.T) { assert.NilError(t, err) t.Run("NoRepoHost", func(t *testing.T) { - // Nothing happens when there is no repository host. - constant := intent.DeepCopy() + // We always add the pgbackrest server certs assert.NilError(t, Secret(ctx, cluster, nil, root, existing, intent)) - assert.DeepEqual(t, constant, intent) + assert.Assert(t, len(intent.Data["pgbackrest-client.crt"]) > 0) + assert.Assert(t, len(intent.Data["pgbackrest-client.key"]) > 0) + assert.Assert(t, len(intent.Data["pgbackrest.ca-roots"]) > 0) + assert.Assert(t, len(intent.Data["pgbackrest-repo-host.crt"]) == 0) + assert.Assert(t, len(intent.Data["pgbackrest-repo-host.key"]) == 0) }) host := new(appsv1.StatefulSet) diff --git a/internal/pgbackrest/restore.md b/internal/pgbackrest/restore.md index 8828576921..95257990d6 100644 --- a/internal/pgbackrest/restore.md +++ b/internal/pgbackrest/restore.md @@ -1,5 +1,5 @@ diff --git a/internal/pgbackrest/tls-server.md b/internal/pgbackrest/tls-server.md index b572cc1ea4..56af386d5b 100644 --- a/internal/pgbackrest/tls-server.md +++ b/internal/pgbackrest/tls-server.md @@ -1,5 +1,5 @@ @@ -12,10 +12,8 @@ on different pods: - [dedicated repository host](https://pgbackrest.org/user-guide.html#repo-host) - [backup from standby](https://pgbackrest.org/user-guide.html#standby-backup) -When a PostgresCluster is configured to store backups on a PVC, the dedicated -repository host is used to make that PVC available to all PostgreSQL instances -in the cluster. Regardless of whether the repo host has a defined PVC, it -functions as the server for the pgBackRest clients that run on the Instances. +When a PostgresCluster is configured to store backups on a PVC, we start a dedicated +repository host to make that PVC available to all PostgreSQL instances in the cluster. The repository host runs a `pgbackrest` server that is secured through TLS and [certificates][]. When performing backups, it connects to `pgbackrest` servers @@ -26,32 +24,30 @@ to the repository host to [send and receive WAL files][archiving]. [archiving]: https://www.postgresql.org/docs/current/continuous-archiving.html [certificates]: certificates.md - The `pgbackrest` command acts as a TLS client and connects to a pgBackRest TLS server when `pg-host-type=tls` and/or `repo-host-type=tls`. The default for these is `ssh`: -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L3771 -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L6137 - +- +- The pgBackRest TLS server is configured through the `tls-server-*` [options](config.md). In pgBackRest 2.38, changing any of these options or changing certificate contents requires a reload of the server, as shown in the "Setup TLS Server" section of the documentation, with the command configured as -``` +```text ExecReload=kill -HUP $MAINPID ``` -- https://pgbackrest.org/user-guide-rhel.html#repo-host/setup-tls +- - `tls-server-address`, `tls-server-port`
The network address and port on which to listen. pgBackRest 2.38 listens on the *first* address returned by `getaddrinfo()`. There is no way to listen on all interfaces. - - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/io/socket/server.c#L172 - - https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/io/socket/common.c#L87 + - + - - `tls-server-cert-file`, `tls-server-key-file`
The [certificate chain][certificates] and private key pair used to encrypt connections. @@ -65,12 +61,11 @@ ExecReload=kill -HUP $MAINPID to interact with. [Required](https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/config/parse.auto.c#L8751). - In pgBackRest 2.38, as mentioned above, sending SIGHUP causes a configuration reload. -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/command/server/server.c#L178 +- -``` +```text P00 DETAIL: configuration reload begin P00 INFO: server command begin 2.38... P00 DETAIL: configuration reload end @@ -78,20 +73,18 @@ P00 DETAIL: configuration reload end Sending SIGINT to the TLS server causes it to exit with code 63, TermError. -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/exit.c#L73-L75 -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/exit.c#L62 -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/common/error.auto.c#L48 +- +- +- - -``` +```text P00 INFO: server command end: terminated on signal [SIGINT] ``` Sending SIGTERM exits the signal loop and lead to the command termination. -- https://github.com/pgbackrest/pgbackrest/blob/release/2.38/src/command/server/server.c#L194 +- - -``` +```text P00 INFO: server command end: completed successfully ``` diff --git a/internal/pgbackrest/util.go b/internal/pgbackrest/util.go index 4fc2266c56..cd5fd11261 100644 --- a/internal/pgbackrest/util.go +++ b/internal/pgbackrest/util.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -10,16 +10,21 @@ import ( "io" "github.com/pkg/errors" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/rand" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +// TODO: Provide explanation for this specific size. Should a tmp dir ever be smaller or larger? +var tmpDirSizeLimit = resource.MustParse("16Mi") + // maxPGBackrestRepos is the maximum number of repositories that can be configured according to the // multi-repository solution implemented within pgBackRest const maxPGBackrestRepos = 4 -// RepoHostVolumeDefined determines whether not at least one pgBackRest dedicated +// RepoHostVolumeDefined determines whether or not at least one pgBackRest dedicated // repository host volume has been defined in the PostgresCluster manifest. func RepoHostVolumeDefined(postgresCluster *v1beta1.PostgresCluster) bool { for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { @@ -30,6 +35,17 @@ func RepoHostVolumeDefined(postgresCluster *v1beta1.PostgresCluster) bool { return false } +// CloudRepoDefined determines whether or not at least one pgBackRest cloud-based +// repository has been defined in the PostgresCluster manifest. +func CloudRepoDefined(postgresCluster *v1beta1.PostgresCluster) bool { + for _, repo := range postgresCluster.Spec.Backups.PGBackRest.Repos { + if repo.Volume == nil { + return true + } + } + return false +} + // CalculateConfigHashes calculates hashes for any external pgBackRest repository configuration // present in the PostgresCluster spec (e.g. configuration for Azure, GCR and/or S3 repositories). // Additionally it returns a hash of the hashes for each external repository. @@ -100,3 +116,39 @@ func safeHash32(content func(w io.Writer) error) (string, error) { } return rand.SafeEncodeString(fmt.Sprint(hash.Sum32())), nil } + +// AddTMPEmptyDir adds a "tmp" EmptyDir volume to the provided Pod template, while then also adding a +// volume mount at /tmp for all containers defined within the Pod template +// The '/tmp' directory is currently utilized for the following: +// - As the pgBackRest lock directory (this is the default lock location for pgBackRest) +// - The location where the replication client certificates can be loaded with the proper +// permissions set +// +// This function was copied from the postgrescluster package. +func AddTMPEmptyDir(template *corev1.PodTemplateSpec) { + + template.Spec.Volumes = append(template.Spec.Volumes, corev1.Volume{ + Name: "tmp", + VolumeSource: corev1.VolumeSource{ + EmptyDir: &corev1.EmptyDirVolumeSource{ + SizeLimit: &tmpDirSizeLimit, + }, + }, + }) + + for i := range template.Spec.Containers { + template.Spec.Containers[i].VolumeMounts = append(template.Spec.Containers[i].VolumeMounts, + corev1.VolumeMount{ + Name: "tmp", + MountPath: "/tmp", + }) + } + + for i := range template.Spec.InitContainers { + template.Spec.InitContainers[i].VolumeMounts = append(template.Spec.InitContainers[i].VolumeMounts, + corev1.VolumeMount{ + Name: "tmp", + MountPath: "/tmp", + }) + } +} diff --git a/internal/pgbackrest/util_test.go b/internal/pgbackrest/util_test.go index eb0f4dec29..d2fd93455c 100644 --- a/internal/pgbackrest/util_test.go +++ b/internal/pgbackrest/util_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -6,7 +6,7 @@ package pgbackrest import ( "io" - "math/rand" + "math/rand/v2" "strconv" "testing" @@ -80,7 +80,7 @@ func TestCalculateConfigHashes(t *testing.T) { assert.Equal(t, preCalculatedRepo3S3Hash, configHashMap["repo3"]) // call CalculateConfigHashes multiple times to ensure consistent results - for i := 0; i < 10; i++ { + for range 10 { hashMap, hash, err := CalculateConfigHashes(postgresCluster) assert.NilError(t, err) assert.Equal(t, configHash, hash) @@ -92,7 +92,7 @@ func TestCalculateConfigHashes(t *testing.T) { // shuffle the repo slice in order to ensure the same result is returned regardless of the // order of the repos slice shuffleCluster := postgresCluster.DeepCopy() - for i := 0; i < 10; i++ { + for range 10 { repos := shuffleCluster.Spec.Backups.PGBackRest.Repos rand.Shuffle(len(repos), func(i, j int) { repos[i], repos[j] = repos[j], repos[i] @@ -103,7 +103,7 @@ func TestCalculateConfigHashes(t *testing.T) { } // now modify some values in each repo and confirm we see a different result - for i := 0; i < 3; i++ { + for i := range 3 { modCluster := postgresCluster.DeepCopy() switch i { case 0: diff --git a/internal/pgbouncer/certificates.go b/internal/pgbouncer/certificates.go index 31f91c503a..c41169f7f8 100644 --- a/internal/pgbouncer/certificates.go +++ b/internal/pgbouncer/certificates.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbouncer/certificates_test.go b/internal/pgbouncer/certificates_test.go index 5955c3de9c..b220b5f922 100644 --- a/internal/pgbouncer/certificates_test.go +++ b/internal/pgbouncer/certificates_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbouncer/config.go b/internal/pgbouncer/config.go index a203144817..c77ac793c3 100644 --- a/internal/pgbouncer/config.go +++ b/internal/pgbouncer/config.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbouncer/config.md b/internal/pgbouncer/config.md index abfec12518..7b9f3eeefa 100644 --- a/internal/pgbouncer/config.md +++ b/internal/pgbouncer/config.md @@ -1,5 +1,5 @@ diff --git a/internal/pgbouncer/config_test.go b/internal/pgbouncer/config_test.go index 7a96da571c..f19a37d992 100644 --- a/internal/pgbouncer/config_test.go +++ b/internal/pgbouncer/config_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -214,7 +214,7 @@ func TestReloadCommand(t *testing.T) { assert.NilError(t, os.WriteFile(file, []byte(command[3]), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) } diff --git a/internal/pgbouncer/postgres.go b/internal/pgbouncer/postgres.go index cbc2e29916..39bf5d7458 100644 --- a/internal/pgbouncer/postgres.go +++ b/internal/pgbouncer/postgres.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -68,6 +68,10 @@ func DisableInPostgreSQL(ctx context.Context, exec postgres.Executor) error { // - https://www.postgresql.org/docs/current/runtime-config-client.html `SET client_min_messages = WARNING;`, + // Do not wait for changes to be replicated. [Since PostgreSQL v9.1] + // - https://www.postgresql.org/docs/current/runtime-config-wal.html + `SET synchronous_commit = LOCAL;`, + // Drop the following objects in a transaction. `BEGIN;`, @@ -102,7 +106,7 @@ SELECT pg_catalog.format('DROP OWNED BY %I CASCADE', :'username') // Remove the PgBouncer user now that the objects and other privileges are gone. stdout, stderr, err = exec.ExecInDatabasesFromQuery(ctx, `SELECT pg_catalog.current_database()`, - `SET client_min_messages = WARNING; DROP ROLE IF EXISTS :"username";`, + `SET client_min_messages = WARNING; SET synchronous_commit = LOCAL; DROP ROLE IF EXISTS :"username";`, map[string]string{ "username": postgresqlUser, @@ -130,6 +134,10 @@ func EnableInPostgreSQL( // - https://www.postgresql.org/docs/current/runtime-config-client.html `SET client_min_messages = WARNING;`, + // Do not wait for changes to be replicated. [Since PostgreSQL v9.1] + // - https://www.postgresql.org/docs/current/runtime-config-wal.html + `SET synchronous_commit = LOCAL;`, + // Create the following objects in a transaction so that permissions // are correct before any other session sees them. // - https://www.postgresql.org/docs/current/ddl-priv.html @@ -175,7 +183,7 @@ REVOKE ALL PRIVILEGES // - https://www.postgresql.org/docs/current/perm-functions.html `ALTER ROLE :"username" SET search_path TO :'namespace';`, - // Allow the PgBouncer user to to login. + // Allow the PgBouncer user to login. `ALTER ROLE :"username" LOGIN PASSWORD :'verifier';`, // Commit (finish) the transaction. @@ -210,14 +218,14 @@ func generatePassword() (plaintext, verifier string, err error) { return } -func postgresqlHBAs() []postgres.HostBasedAuthentication { +func postgresqlHBAs() []*postgres.HostBasedAuthentication { // PgBouncer must connect over TLS using a SCRAM password. Other network // connections are forbidden. // - https://www.postgresql.org/docs/current/auth-pg-hba-conf.html // - https://www.postgresql.org/docs/current/auth-password.html - return []postgres.HostBasedAuthentication{ - *postgres.NewHBA().User(postgresqlUser).TLS().Method("scram-sha-256"), - *postgres.NewHBA().User(postgresqlUser).TCP().Method("reject"), + return []*postgres.HostBasedAuthentication{ + postgres.NewHBA().User(postgresqlUser).TLS().Method("scram-sha-256"), + postgres.NewHBA().User(postgresqlUser).TCP().Method("reject"), } } diff --git a/internal/pgbouncer/postgres_test.go b/internal/pgbouncer/postgres_test.go index f2ce419753..59b0a37456 100644 --- a/internal/pgbouncer/postgres_test.go +++ b/internal/pgbouncer/postgres_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -49,6 +49,7 @@ func TestDisableInPostgreSQL(t *testing.T) { assert.NilError(t, err) assert.Equal(t, string(b), strings.TrimSpace(` SET client_min_messages = WARNING; +SET synchronous_commit = LOCAL; BEGIN; DROP FUNCTION IF EXISTS :"namespace".get_auth(username TEXT); DROP SCHEMA IF EXISTS :"namespace" CASCADE; @@ -90,7 +91,7 @@ COMMIT;`)) b, err := io.ReadAll(stdin) assert.NilError(t, err) - assert.Equal(t, string(b), `SET client_min_messages = WARNING; DROP ROLE IF EXISTS :"username";`) + assert.Equal(t, string(b), `SET client_min_messages = WARNING; SET synchronous_commit = LOCAL; DROP ROLE IF EXISTS :"username";`) gomega.NewWithT(t).Expect(command).To(gomega.ContainElements( `--set=username=_crunchypgbouncer`, ), "expected query parameters") @@ -135,6 +136,7 @@ func TestEnableInPostgreSQL(t *testing.T) { assert.NilError(t, err) assert.Equal(t, string(b), strings.TrimSpace(` SET client_min_messages = WARNING; +SET synchronous_commit = LOCAL; BEGIN; SELECT pg_catalog.format('CREATE ROLE %I NOLOGIN', :'username') WHERE NOT EXISTS (SELECT 1 FROM pg_catalog.pg_roles WHERE rolname = :'username') diff --git a/internal/pgbouncer/reconcile.go b/internal/pgbouncer/reconcile.go index 999d6524a5..ad4f16bb08 100644 --- a/internal/pgbouncer/reconcile.go +++ b/internal/pgbouncer/reconcile.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgbouncer/reconcile_test.go b/internal/pgbouncer/reconcile_test.go index a53de8cf64..c5d31bc608 100644 --- a/internal/pgbouncer/reconcile_test.go +++ b/internal/pgbouncer/reconcile_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -491,6 +491,6 @@ func TestPostgreSQL(t *testing.T) { Mandatory: postgresqlHBAs(), }, // postgres.HostBasedAuthentication has unexported fields. Call String() to compare. - gocmp.Transformer("", postgres.HostBasedAuthentication.String)) + gocmp.Transformer("", (*postgres.HostBasedAuthentication).String)) }) } diff --git a/internal/pgmonitor/exporter.go b/internal/pgmonitor/exporter.go index 9d7a1fc3c6..c8422fcc2c 100644 --- a/internal/pgmonitor/exporter.go +++ b/internal/pgmonitor/exporter.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgmonitor/exporter_test.go b/internal/pgmonitor/exporter_test.go index 5ba14e0993..486b658dab 100644 --- a/internal/pgmonitor/exporter_test.go +++ b/internal/pgmonitor/exporter_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgmonitor/postgres.go b/internal/pgmonitor/postgres.go index 8aed164a18..292d116e30 100644 --- a/internal/pgmonitor/postgres.go +++ b/internal/pgmonitor/postgres.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -26,9 +26,9 @@ func PostgreSQLHBAs(inCluster *v1beta1.PostgresCluster, outHBAs *postgres.HBAs) if ExporterEnabled(inCluster) { // Limit the monitoring user to local connections using SCRAM. outHBAs.Mandatory = append(outHBAs.Mandatory, - *postgres.NewHBA().TCP().User(MonitoringUser).Method("scram-sha-256").Network("127.0.0.0/8"), - *postgres.NewHBA().TCP().User(MonitoringUser).Method("scram-sha-256").Network("::1/128"), - *postgres.NewHBA().TCP().User(MonitoringUser).Method("reject")) + postgres.NewHBA().TCP().User(MonitoringUser).Method("scram-sha-256").Network("127.0.0.0/8"), + postgres.NewHBA().TCP().User(MonitoringUser).Method("scram-sha-256").Network("::1/128"), + postgres.NewHBA().TCP().User(MonitoringUser).Method("reject")) } } @@ -79,6 +79,10 @@ func EnableExporterInPostgreSQL(ctx context.Context, exec postgres.Executor, // - https://www.postgresql.org/docs/current/runtime-config-client.html `SET client_min_messages = WARNING;`, + // Do not wait for changes to be replicated. [Since PostgreSQL v9.1] + // - https://www.postgresql.org/docs/current/runtime-config-wal.html + `SET synchronous_commit = LOCAL;`, + // Exporter expects that extension(s) to be installed in all databases // pg_stat_statements: https://access.crunchydata.com/documentation/pgmonitor/latest/exporter/ "CREATE EXTENSION IF NOT EXISTS pg_stat_statements;", @@ -103,6 +107,10 @@ func EnableExporterInPostgreSQL(ctx context.Context, exec postgres.Executor, // - https://www.postgresql.org/docs/current/runtime-config-client.html `SET client_min_messages = WARNING;`, + // Do not wait for changes to be replicated. [Since PostgreSQL v9.1] + // - https://www.postgresql.org/docs/current/runtime-config-wal.html + `SET synchronous_commit = LOCAL;`, + // Setup.sql file from the exporter image. sql is specific // to the PostgreSQL version setup, diff --git a/internal/pgmonitor/postgres_test.go b/internal/pgmonitor/postgres_test.go index 655fa936ae..b91e9ba125 100644 --- a/internal/pgmonitor/postgres_test.go +++ b/internal/pgmonitor/postgres_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgmonitor/util.go b/internal/pgmonitor/util.go index f5606ccd08..8c89815829 100644 --- a/internal/pgmonitor/util.go +++ b/internal/pgmonitor/util.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pgmonitor/util_test.go b/internal/pgmonitor/util_test.go index 8d16d74bae..30d28b45d7 100644 --- a/internal/pgmonitor/util_test.go +++ b/internal/pgmonitor/util_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pki/common.go b/internal/pki/common.go index fbe9421f8b..9075931289 100644 --- a/internal/pki/common.go +++ b/internal/pki/common.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pki/doc.go b/internal/pki/doc.go index 71f8c0a1bc..8bd238e904 100644 --- a/internal/pki/doc.go +++ b/internal/pki/doc.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pki/encoding.go b/internal/pki/encoding.go index 2d2cd851e3..f3d45ffc70 100644 --- a/internal/pki/encoding.go +++ b/internal/pki/encoding.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pki/encoding_test.go b/internal/pki/encoding_test.go index cdf7c0de5a..eb2b1365b3 100644 --- a/internal/pki/encoding_test.go +++ b/internal/pki/encoding_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -81,7 +81,7 @@ func TestCertificateTextMarshaling(t *testing.T) { assert.NilError(t, os.WriteFile(certFile, certBytes, 0o600)) // The "openssl x509" command parses X.509 certificates. - cmd := exec.Command(openssl, "x509", + cmd := exec.CommandContext(t.Context(), openssl, "x509", "-in", certFile, "-inform", "PEM", "-noout", "-text") output, err := cmd.CombinedOutput() @@ -153,7 +153,7 @@ func TestPrivateKeyTextMarshaling(t *testing.T) { assert.NilError(t, os.WriteFile(keyFile, keyBytes, 0o600)) // The "openssl pkey" command processes public and private keys. - cmd := exec.Command(openssl, "pkey", + cmd := exec.CommandContext(t.Context(), openssl, "pkey", "-in", keyFile, "-inform", "PEM", "-noout", "-text") output, err := cmd.CombinedOutput() @@ -164,12 +164,12 @@ func TestPrivateKeyTextMarshaling(t *testing.T) { "expected valid private key, got:\n%s", output) t.Run("Check", func(t *testing.T) { - output, _ := exec.Command(openssl, "pkey", "-help").CombinedOutput() + output, _ := exec.CommandContext(t.Context(), openssl, "pkey", "-help").CombinedOutput() if !strings.Contains(string(output), "-check") { t.Skip(`requires "-check" flag`) } - cmd := exec.Command(openssl, "pkey", + cmd := exec.CommandContext(t.Context(), openssl, "pkey", "-check", "-in", keyFile, "-inform", "PEM", "-noout", "-text") output, err := cmd.CombinedOutput() diff --git a/internal/pki/pki.go b/internal/pki/pki.go index 7048810654..80f16fb2e5 100644 --- a/internal/pki/pki.go +++ b/internal/pki/pki.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/pki/pki_test.go b/internal/pki/pki_test.go index cd13896450..fa8f290475 100644 --- a/internal/pki/pki_test.go +++ b/internal/pki/pki_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -194,7 +194,7 @@ func TestRootIsInvalid(t *testing.T) { t.Cleanup(func() { currentTime = original }) currentTime = func() time.Time { - return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.Local) + return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC) } root, err := NewRootCertificateAuthority() @@ -395,7 +395,7 @@ func TestLeafIsInvalid(t *testing.T) { t.Cleanup(func() { currentTime = original }) currentTime = func() time.Time { - return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.Local) + return time.Date(2010, time.January, 1, 0, 0, 0, 0, time.UTC) } leaf, err := root.GenerateLeafCertificate("", nil) @@ -439,7 +439,7 @@ func basicOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { verify := func(t testing.TB, args ...string) { t.Helper() // #nosec G204 -- args from this test - cmd := exec.Command(openssl, append([]string{"verify"}, args...)...) + cmd := exec.CommandContext(t.Context(), openssl, append([]string{"verify"}, args...)...) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) @@ -476,7 +476,7 @@ func basicOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { } func strictOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { - output, _ := exec.Command(openssl, "verify", "-help").CombinedOutput() + output, _ := exec.CommandContext(t.Context(), openssl, "verify", "-help").CombinedOutput() if !strings.Contains(string(output), "-x509_strict") { t.Skip(`requires "-x509_strict" flag`) } @@ -487,7 +487,7 @@ func strictOpenSSLVerify(t *testing.T, openssl string, root, leaf Certificate) { verify := func(t testing.TB, args ...string) { t.Helper() // #nosec G204 -- args from this test - cmd := exec.Command(openssl, append([]string{"verify", + cmd := exec.CommandContext(t.Context(), openssl, append([]string{"verify", // Do not use the default trusted CAs. "-no-CAfile", "-no-CApath", // Disable "non-compliant workarounds for broken certificates". diff --git a/internal/postgis/postgis.go b/internal/postgis/postgis.go index f54da0dd93..5a90c7afe2 100644 --- a/internal/postgis/postgis.go +++ b/internal/postgis/postgis.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -26,6 +26,10 @@ func EnableInPostgreSQL(ctx context.Context, exec postgres.Executor) error { // - https://www.postgresql.org/docs/current/runtime-config-client.html `SET client_min_messages = WARNING;`, + // Do not wait for changes to be replicated. [Since PostgreSQL v9.1] + // - https://www.postgresql.org/docs/current/runtime-config-wal.html + `SET synchronous_commit = LOCAL;`, + `CREATE EXTENSION IF NOT EXISTS postgis;`, `CREATE EXTENSION IF NOT EXISTS postgis_topology;`, `CREATE EXTENSION IF NOT EXISTS fuzzystrmatch;`, diff --git a/internal/postgis/postgis_test.go b/internal/postgis/postgis_test.go index 5f604abc90..7e83c840e9 100644 --- a/internal/postgis/postgis_test.go +++ b/internal/postgis/postgis_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -29,6 +29,7 @@ func TestEnableInPostgreSQL(t *testing.T) { b, err := io.ReadAll(stdin) assert.NilError(t, err) assert.Equal(t, string(b), `SET client_min_messages = WARNING; +SET synchronous_commit = LOCAL; CREATE EXTENSION IF NOT EXISTS postgis; CREATE EXTENSION IF NOT EXISTS postgis_topology; CREATE EXTENSION IF NOT EXISTS fuzzystrmatch; diff --git a/internal/postgres/config.go b/internal/postgres/config.go index ce1acde3fb..cf6792cfa9 100644 --- a/internal/postgres/config.go +++ b/internal/postgres/config.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/config_test.go b/internal/postgres/config_test.go index cd4c92d185..ef24f202c3 100644 --- a/internal/postgres/config_test.go +++ b/internal/postgres/config_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -53,7 +53,7 @@ func TestWALDirectory(t *testing.T) { func TestBashHalt(t *testing.T) { t.Run("NoPipeline", func(t *testing.T) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; halt ab cd e`) var exit *exec.ExitError @@ -65,7 +65,7 @@ func TestBashHalt(t *testing.T) { }) t.Run("PipelineZeroStatus", func(t *testing.T) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; true && halt message`) var exit *exec.ExitError @@ -77,7 +77,7 @@ func TestBashHalt(t *testing.T) { }) t.Run("PipelineNonZeroStatus", func(t *testing.T) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; (exit 99) || halt $'multi\nline'`) var exit *exec.ExitError @@ -89,7 +89,7 @@ func TestBashHalt(t *testing.T) { }) t.Run("Subshell", func(t *testing.T) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashHalt+`; (halt 'err') || echo 'after'`) stderr := new(bytes.Buffer) @@ -105,7 +105,7 @@ func TestBashHalt(t *testing.T) { func TestBashPermissions(t *testing.T) { // macOS `stat` takes different arguments than BusyBox and GNU coreutils. - if output, err := exec.Command("stat", "--help").CombinedOutput(); err != nil { + if output, err := exec.CommandContext(t.Context(), "stat", "--help").CombinedOutput(); err != nil { t.Skip(`requires "stat" executable`) } else if !strings.Contains(string(output), "%A") { t.Skip(`requires "stat" with access format sequence`) @@ -117,7 +117,7 @@ func TestBashPermissions(t *testing.T) { assert.NilError(t, os.WriteFile(filepath.Join(dir, "sub", "fn"), nil, 0o624)) // #nosec G306 OK permissions for a temp dir in a test assert.NilError(t, os.Chmod(filepath.Join(dir, "sub", "fn"), 0o624)) - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-c", "--", bashPermissions+`; permissions "$@"`, "-", filepath.Join(dir, "sub", "fn")) @@ -132,7 +132,7 @@ func TestBashPermissions(t *testing.T) { func TestBashRecreateDirectory(t *testing.T) { // macOS `stat` takes different arguments than BusyBox and GNU coreutils. - if output, err := exec.Command("stat", "--help").CombinedOutput(); err != nil { + if output, err := exec.CommandContext(t.Context(), "stat", "--help").CombinedOutput(); err != nil { t.Skip(`requires "stat" executable`) } else if !strings.Contains(string(output), "%a") { t.Skip(`requires "stat" with access format sequence`) @@ -144,7 +144,7 @@ func TestBashRecreateDirectory(t *testing.T) { assert.NilError(t, os.WriteFile(filepath.Join(dir, "d", "file"), nil, 0o644)) // #nosec G306 OK permissions for a temp dir in a test stat := func(args ...string) string { - cmd := exec.Command("stat", "-c", "%i %#a %N") + cmd := exec.CommandContext(t.Context(), "stat", "-c", "%i %#a %N") cmd.Args = append(cmd.Args, args...) out, err := cmd.CombinedOutput() @@ -161,7 +161,7 @@ func TestBashRecreateDirectory(t *testing.T) { filepath.Join(dir, "d", "file"), ) - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-ceu", "--", bashRecreateDirectory+` recreate "$@"`, "-", filepath.Join(dir, "d"), "0740") @@ -200,7 +200,7 @@ func TestBashRecreateDirectory(t *testing.T) { func TestBashSafeLink(t *testing.T) { // macOS `mv` takes different arguments than GNU coreutils. - if output, err := exec.Command("mv", "--help").CombinedOutput(); err != nil { + if output, err := exec.CommandContext(t.Context(), "mv", "--help").CombinedOutput(); err != nil { t.Skip(`requires "mv" executable`) } else if !strings.Contains(string(output), "no-target-directory") { t.Skip(`requires "mv" that overwrites a directory symlink`) @@ -208,7 +208,7 @@ func TestBashSafeLink(t *testing.T) { // execute calls the bash function with args. execute := func(args ...string) (string, error) { - cmd := exec.Command("bash") + cmd := exec.CommandContext(t.Context(), "bash") cmd.Args = append(cmd.Args, "-ceu", "--", bashSafeLink+`safelink "$@"`, "-") cmd.Args = append(cmd.Args, args...) output, err := cmd.CombinedOutput() @@ -475,7 +475,7 @@ func TestStartupCommand(t *testing.T) { assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(ctx, shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) diff --git a/internal/postgres/databases.go b/internal/postgres/databases.go index 0d70170527..92d07a9ee8 100644 --- a/internal/postgres/databases.go +++ b/internal/postgres/databases.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/databases_test.go b/internal/postgres/databases_test.go index e025e86788..374bb450d0 100644 --- a/internal/postgres/databases_test.go +++ b/internal/postgres/databases_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/doc.go b/internal/postgres/doc.go index bd616b5916..c03bbc315b 100644 --- a/internal/postgres/doc.go +++ b/internal/postgres/doc.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/exec.go b/internal/postgres/exec.go index a846a8aa57..3042fdf828 100644 --- a/internal/postgres/exec.go +++ b/internal/postgres/exec.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/exec_test.go b/internal/postgres/exec_test.go index df9b862577..3ec94717d5 100644 --- a/internal/postgres/exec_test.go +++ b/internal/postgres/exec_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -184,7 +184,7 @@ done <<< "${databases}" assert.NilError(t, os.WriteFile(file, []byte(script), 0o600)) // Expect shellcheck to be happy. - cmd := exec.Command(shellcheck, "--enable=all", file) + cmd := exec.CommandContext(t.Context(), shellcheck, "--enable=all", file) output, err := cmd.CombinedOutput() assert.NilError(t, err, "%q\n%s", cmd.Args, output) diff --git a/internal/postgres/hba.go b/internal/postgres/hba.go index d9b5ce2680..3163b3307b 100644 --- a/internal/postgres/hba.go +++ b/internal/postgres/hba.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -12,31 +12,31 @@ import ( // NewHBAs returns HostBasedAuthentication records required by this package. func NewHBAs() HBAs { return HBAs{ - Mandatory: []HostBasedAuthentication{ + Mandatory: []*HostBasedAuthentication{ // The "postgres" superuser must always be able to connect locally. - *NewHBA().Local().User("postgres").Method("peer"), + NewHBA().Local().User("postgres").Method("peer"), // The replication user must always connect over TLS using certificate // authentication. Patroni also connects to the "postgres" database // when calling `pg_rewind`. // - https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION-AUTHENTICATION - *NewHBA().TLS().User(ReplicationUser).Method("cert").Replication(), - *NewHBA().TLS().User(ReplicationUser).Method("cert").Database("postgres"), - *NewHBA().TCP().User(ReplicationUser).Method("reject"), + NewHBA().TLS().User(ReplicationUser).Method("cert").Replication(), + NewHBA().TLS().User(ReplicationUser).Method("cert").Database("postgres"), + NewHBA().TCP().User(ReplicationUser).Method("reject"), }, - Default: []HostBasedAuthentication{ + Default: []*HostBasedAuthentication{ // Allow TLS connections to any database using passwords. The "md5" // authentication method automatically verifies passwords encrypted // using either MD5 or SCRAM-SHA-256. // - https://www.postgresql.org/docs/current/auth-password.html - *NewHBA().TLS().Method("md5"), + NewHBA().TLS().Method("md5"), }, } } // HBAs is a pairing of HostBasedAuthentication records. -type HBAs struct{ Mandatory, Default []HostBasedAuthentication } +type HBAs struct{ Mandatory, Default []*HostBasedAuthentication } // HostBasedAuthentication represents a single record for pg_hba.conf. // - https://www.postgresql.org/docs/current/auth-pg-hba-conf.html @@ -49,7 +49,7 @@ func NewHBA() *HostBasedAuthentication { return new(HostBasedAuthentication).AllDatabases().AllNetworks().AllUsers() } -func (HostBasedAuthentication) quote(value string) string { +func (*HostBasedAuthentication) quote(value string) string { return `"` + strings.ReplaceAll(value, `"`, `""`) + `"` } @@ -148,7 +148,7 @@ func (hba *HostBasedAuthentication) User(name string) *HostBasedAuthentication { } // String returns hba formatted for the pg_hba.conf file without a newline. -func (hba HostBasedAuthentication) String() string { +func (hba *HostBasedAuthentication) String() string { if hba.origin == "local" { return strings.TrimSpace(fmt.Sprintf("local %s %s %s %s", hba.database, hba.user, hba.method, hba.options)) diff --git a/internal/postgres/hba_test.go b/internal/postgres/hba_test.go index 9744479fdd..7457b7f649 100644 --- a/internal/postgres/hba_test.go +++ b/internal/postgres/hba_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -14,7 +14,7 @@ import ( ) func TestNewHBAs(t *testing.T) { - matches := func(actual []HostBasedAuthentication, expected string) cmp.Comparison { + matches := func(actual []*HostBasedAuthentication, expected string) cmp.Comparison { printed := make([]string, len(actual)) for i := range actual { printed[i] = actual[i].String() diff --git a/internal/postgres/huge_pages.go b/internal/postgres/huge_pages.go index ee13c0d11b..b38120bafd 100644 --- a/internal/postgres/huge_pages.go +++ b/internal/postgres/huge_pages.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/huge_pages_test.go b/internal/postgres/huge_pages_test.go index 58a6a6aa57..9b9f12172f 100644 --- a/internal/postgres/huge_pages_test.go +++ b/internal/postgres/huge_pages_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/iana.go b/internal/postgres/iana.go index 4392b549f1..395ee1d1e8 100644 --- a/internal/postgres/iana.go +++ b/internal/postgres/iana.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/parameters.go b/internal/postgres/parameters.go index 434d9fd1dd..934bc67b28 100644 --- a/internal/postgres/parameters.go +++ b/internal/postgres/parameters.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -65,7 +65,7 @@ func NewParameterSet() *ParameterSet { } // AsMap returns a copy of ps as a map. -func (ps ParameterSet) AsMap() map[string]string { +func (ps *ParameterSet) AsMap() map[string]string { out := make(map[string]string, len(ps.values)) for name, value := range ps.values { out[name] = value @@ -102,25 +102,25 @@ func (ps *ParameterSet) AppendToList(name string, value ...string) { } // Get returns the value of parameter name and whether or not it was present in ps. -func (ps ParameterSet) Get(name string) (string, bool) { +func (ps *ParameterSet) Get(name string) (string, bool) { value, ok := ps.values[ps.normalize(name)] return value, ok } // Has returns whether or not parameter name is present in ps. -func (ps ParameterSet) Has(name string) bool { +func (ps *ParameterSet) Has(name string) bool { _, ok := ps.Get(name) return ok } -func (ParameterSet) normalize(name string) string { +func (*ParameterSet) normalize(name string) string { // All parameter names are case-insensitive. // -- https://www.postgresql.org/docs/current/config-setting.html return strings.ToLower(name) } // Value returns empty string or the value of parameter name if it is present in ps. -func (ps ParameterSet) Value(name string) string { +func (ps *ParameterSet) Value(name string) string { value, _ := ps.Get(name) return value } diff --git a/internal/postgres/parameters_test.go b/internal/postgres/parameters_test.go index c6228d7958..f5497cb672 100644 --- a/internal/postgres/parameters_test.go +++ b/internal/postgres/parameters_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/password/doc.go b/internal/postgres/password/doc.go index eef7ed7db2..f3572a4588 100644 --- a/internal/postgres/password/doc.go +++ b/internal/postgres/password/doc.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/password/md5.go b/internal/postgres/password/md5.go index 884dfb655e..55cc43f5cb 100644 --- a/internal/postgres/password/md5.go +++ b/internal/postgres/password/md5.go @@ -1,11 +1,10 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 package password import ( - // #nosec G501 "crypto/md5" "errors" diff --git a/internal/postgres/password/md5_test.go b/internal/postgres/password/md5_test.go index 80cb7742d6..d6f564eab8 100644 --- a/internal/postgres/password/md5_test.go +++ b/internal/postgres/password/md5_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/password/password.go b/internal/postgres/password/password.go index 337282cc74..5f5894f535 100644 --- a/internal/postgres/password/password.go +++ b/internal/postgres/password/password.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/password/password_test.go b/internal/postgres/password/password_test.go index 3401dec4ac..bc4ddc883d 100644 --- a/internal/postgres/password/password_test.go +++ b/internal/postgres/password/password_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/password/scram.go b/internal/postgres/password/scram.go index 8264cd87a0..90eb2a54ad 100644 --- a/internal/postgres/password/scram.go +++ b/internal/postgres/password/scram.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -138,7 +138,7 @@ func (s *SCRAMPassword) isASCII() bool { // iterate through each character of the plaintext password and determine if // it is ASCII. if it is not ASCII, exit early // per research, this loop is optimized to be fast for searching - for i := 0; i < len(s.password); i++ { + for i := range len(s.password) { if s.password[i] > unicode.MaxASCII { return false } diff --git a/internal/postgres/password/scram_test.go b/internal/postgres/password/scram_test.go index 0552e519b7..2c5cd2089d 100644 --- a/internal/postgres/password/scram_test.go +++ b/internal/postgres/password/scram_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/reconcile.go b/internal/postgres/reconcile.go index 344f91dd9f..3a98c502c5 100644 --- a/internal/postgres/reconcile.go +++ b/internal/postgres/reconcile.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index 138b5c7b3e..94a5a99aba 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/postgres/users.go b/internal/postgres/users.go index be8785a4e5..ab78b82c73 100644 --- a/internal/postgres/users.go +++ b/internal/postgres/users.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -66,6 +66,10 @@ func WriteUsersInPostgreSQL( var err error var sql bytes.Buffer + // Do not wait for changes to be replicated. [Since PostgreSQL v9.1] + // - https://www.postgresql.org/docs/current/runtime-config-wal.html + _, _ = sql.WriteString(`SET synchronous_commit = LOCAL;`) + // Prevent unexpected dereferences by emptying "search_path". The "pg_catalog" // schema is still searched, and only temporary objects can be created. // - https://www.postgresql.org/docs/current/runtime-config-client.html#GUC-SEARCH-PATH @@ -216,6 +220,10 @@ func WriteUsersSchemasInPostgreSQL(ctx context.Context, exec Executor, // - https://www.postgresql.org/docs/current/runtime-config-client.html `SET client_min_messages = WARNING;`, + // Do not wait for changes to be replicated. [Since PostgreSQL v9.1] + // - https://www.postgresql.org/docs/current/runtime-config-wal.html + `SET synchronous_commit = LOCAL;`, + // Creates a schema named after and owned by the user // - https://www.postgresql.org/docs/current/ddl-schemas.html // - https://www.postgresql.org/docs/current/sql-createschema.html diff --git a/internal/postgres/users_test.go b/internal/postgres/users_test.go index 141175c78e..57587a3b11 100644 --- a/internal/postgres/users_test.go +++ b/internal/postgres/users_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -63,7 +63,7 @@ func TestWriteUsersInPostgreSQL(t *testing.T) { b, err := io.ReadAll(stdin) assert.NilError(t, err) assert.Equal(t, string(b), strings.TrimSpace(` -SET search_path TO ''; +SET synchronous_commit = LOCAL;SET search_path TO ''; CREATE TEMPORARY TABLE input (id serial, data json); \copy input (data) from stdin with (format text) \. diff --git a/internal/postgres/versions.go b/internal/postgres/versions.go new file mode 100644 index 0000000000..17d067966d --- /dev/null +++ b/internal/postgres/versions.go @@ -0,0 +1,26 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import "time" + +// https://www.postgresql.org/support/versioning +var finalReleaseDates = map[int]time.Time{ + 10: time.Date(2022, time.November+1, 10, 0, 0, 0, 0, time.UTC), + 11: time.Date(2023, time.November+1, +9, 0, 0, 0, 0, time.UTC), + 12: time.Date(2024, time.November+1, 14, 0, 0, 0, 0, time.UTC), + 13: time.Date(2025, time.November+1, 13, 0, 0, 0, 0, time.UTC), + 14: time.Date(2026, time.November+1, 12, 0, 0, 0, 0, time.UTC), + 15: time.Date(2027, time.November+1, 11, 0, 0, 0, 0, time.UTC), + 16: time.Date(2028, time.November+1, +9, 0, 0, 0, 0, time.UTC), + 17: time.Date(2029, time.November+1, +8, 0, 0, 0, 0, time.UTC), +} + +// ReleaseIsFinal returns whether or not t is definitively past the final +// scheduled release of a Postgres version. +func ReleaseIsFinal(majorVersion int, t time.Time) bool { + known, ok := finalReleaseDates[majorVersion] + return ok && t.After(known) +} diff --git a/internal/postgres/versions_test.go b/internal/postgres/versions_test.go new file mode 100644 index 0000000000..089deef399 --- /dev/null +++ b/internal/postgres/versions_test.go @@ -0,0 +1,34 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package postgres + +import ( + "testing" + "time" + + "gotest.tools/v3/assert" +) + +func TestReleaseIsFinal(t *testing.T) { + // On November 4th, 2024, PG 10 and 11 were EOL and 12-17 were supported. + testDate, err := time.Parse("2006-Jan-02", "2024-Nov-04") + assert.NilError(t, err) + assert.Check(t, ReleaseIsFinal(10, testDate)) + assert.Check(t, ReleaseIsFinal(11, testDate)) + assert.Check(t, !ReleaseIsFinal(12, testDate)) + assert.Check(t, !ReleaseIsFinal(13, testDate)) + assert.Check(t, !ReleaseIsFinal(14, testDate)) + assert.Check(t, !ReleaseIsFinal(15, testDate)) + assert.Check(t, !ReleaseIsFinal(16, testDate)) + assert.Check(t, !ReleaseIsFinal(17, testDate)) + + // On December 15th, 2024 we alert that PG 12 is EOL + testDate = testDate.AddDate(0, 1, 11) + assert.Check(t, ReleaseIsFinal(12, testDate)) + + // ReleaseIsFinal covers PG versions 10 and greater. Any version not covered + // by the case statement in ReleaseIsFinal returns false + assert.Check(t, !ReleaseIsFinal(1, testDate)) +} diff --git a/internal/postgres/wal.md b/internal/postgres/wal.md index afb094c20e..b88185aa21 100644 --- a/internal/postgres/wal.md +++ b/internal/postgres/wal.md @@ -1,5 +1,5 @@ diff --git a/internal/registration/interface.go b/internal/registration/interface.go deleted file mode 100644 index 578a064e2b..0000000000 --- a/internal/registration/interface.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package registration - -import ( - "fmt" - "os" - - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -type Registration interface { - // Required returns true when registration is required but the token is missing or invalid. - Required(record.EventRecorder, client.Object, *[]metav1.Condition) bool -} - -var URL = os.Getenv("REGISTRATION_URL") - -func SetAdvanceWarning(recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition) { - recorder.Eventf(object, corev1.EventTypeWarning, "Register Soon", - "Crunchy Postgres for Kubernetes requires registration for upgrades."+ - " Register now to be ready for your next upgrade. See %s for details.", URL) - - meta.SetStatusCondition(conditions, metav1.Condition{ - Type: v1beta1.Registered, - Status: metav1.ConditionFalse, - Reason: "TokenRequired", - Message: fmt.Sprintf( - "Crunchy Postgres for Kubernetes requires registration for upgrades."+ - " Register now to be ready for your next upgrade. See %s for details.", URL), - ObservedGeneration: object.GetGeneration(), - }) -} - -func SetRequiredWarning(recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition) { - recorder.Eventf(object, corev1.EventTypeWarning, "Registration Required", - "Crunchy Postgres for Kubernetes requires registration for upgrades."+ - " Register now to be ready for your next upgrade. See %s for details.", URL) - - meta.SetStatusCondition(conditions, metav1.Condition{ - Type: v1beta1.Registered, - Status: metav1.ConditionFalse, - Reason: "TokenRequired", - Message: fmt.Sprintf( - "Crunchy Postgres for Kubernetes requires registration for upgrades."+ - " Upgrade suspended. See %s for details.", URL), - ObservedGeneration: object.GetGeneration(), - }) -} - -func emitFailedWarning(recorder record.EventRecorder, object client.Object) { - recorder.Eventf(object, corev1.EventTypeWarning, "Token Authentication Failed", - "See %s for details.", URL) -} - -func emitVerifiedEvent(recorder record.EventRecorder, object client.Object) { - recorder.Event(object, corev1.EventTypeNormal, "Token Verified", - "Thank you for registering your installation of Crunchy Postgres for Kubernetes.") -} diff --git a/internal/registration/runner.go b/internal/registration/runner.go deleted file mode 100644 index 0d607e1e94..0000000000 --- a/internal/registration/runner.go +++ /dev/null @@ -1,191 +0,0 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package registration - -import ( - "context" - "crypto/rsa" - "errors" - "os" - "strings" - "sync" - "time" - - "github.com/golang-jwt/jwt/v5" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -// Runner implements [Registration] by loading and validating the token at a -// fixed path. Its methods are safe to call concurrently. -type Runner struct { - changed func() - enabled bool - publicKey *rsa.PublicKey - refresh time.Duration - tokenPath string - - token struct { - sync.RWMutex - Exists bool `json:"-"` - - jwt.RegisteredClaims - Iteration int `json:"itr"` - } -} - -// Runner implements [Registration] and [manager.Runnable]. -var _ Registration = (*Runner)(nil) -var _ manager.Runnable = (*Runner)(nil) - -// NewRunner creates a [Runner] that periodically checks the validity of the -// token at tokenPath. It calls changed when the validity of the token changes. -func NewRunner(publicKey, tokenPath string, changed func()) (*Runner, error) { - runner := &Runner{ - changed: changed, - refresh: time.Minute, - tokenPath: tokenPath, - } - - var err error - switch { - case publicKey != "" && tokenPath != "": - if !strings.HasPrefix(strings.TrimSpace(publicKey), "-") { - publicKey = "-----BEGIN -----\n" + publicKey + "\n-----END -----" - } - - runner.enabled = true - runner.publicKey, err = jwt.ParseRSAPublicKeyFromPEM([]byte(publicKey)) - - case publicKey == "" && tokenPath != "": - err = errors.New("registration: missing public key") - - case publicKey != "" && tokenPath == "": - err = errors.New("registration: missing token path") - } - - return runner, err -} - -// CheckToken loads and verifies the configured token, returning an error when -// the file exists but cannot be verified, and -// returning the token if it can be verified. -// NOTE(upgradecheck): return the token/nil so that we can use the token -// in upgradecheck; currently a refresh of the token will cause a restart of the pod -// meaning that the token used in upgradecheck is always the current token. -// But if the restart behavior changes, we might drop the token return in main.go -// and change upgradecheck to retrieve the token itself -func (r *Runner) CheckToken() (*jwt.Token, error) { - data, errFile := os.ReadFile(r.tokenPath) - key := func(*jwt.Token) (any, error) { return r.publicKey, nil } - - // Assume [jwt] and [os] functions could do something unexpected; use defer - // to safely write to the token. - r.token.Lock() - defer r.token.Unlock() - - token, errToken := jwt.ParseWithClaims(string(data), &r.token, key, - jwt.WithExpirationRequired(), - jwt.WithValidMethods([]string{"RS256"}), - ) - - // The error from [os.ReadFile] indicates whether a token file exists. - r.token.Exists = !os.IsNotExist(errFile) - - // Reset most claims if there is any problem loading, parsing, validating, or - // verifying the token file. - if errFile != nil || errToken != nil { - r.token.RegisteredClaims = jwt.RegisteredClaims{} - } - - switch { - case !r.enabled || !r.token.Exists: - return nil, nil - case errFile != nil: - return nil, errFile - default: - return token, errToken - } -} - -func (r *Runner) state() (failed, required bool) { - // Assume [time] functions could do something unexpected; use defer to safely - // read the token. - r.token.RLock() - defer r.token.RUnlock() - - failed = r.token.Exists && r.token.ExpiresAt == nil - required = r.enabled && - (!r.token.Exists || failed || r.token.ExpiresAt.Before(time.Now())) - return -} - -// Required returns true when registration is required but the token is missing or invalid. -func (r *Runner) Required( - recorder record.EventRecorder, object client.Object, conditions *[]metav1.Condition, -) bool { - failed, required := r.state() - - if r.enabled && failed { - emitFailedWarning(recorder, object) - } - - if !required && conditions != nil { - before := len(*conditions) - meta.RemoveStatusCondition(conditions, v1beta1.Registered) - meta.RemoveStatusCondition(conditions, "RegistrationRequired") - meta.RemoveStatusCondition(conditions, "TokenRequired") - found := len(*conditions) != before - - if r.enabled && found { - emitVerifiedEvent(recorder, object) - } - } - - return required -} - -// NeedLeaderElection returns true so that r runs only on the single -// [manager.Manager] that is elected leader in the Kubernetes namespace. -func (r *Runner) NeedLeaderElection() bool { return true } - -// Start watches for a mounted registration token when enabled. It blocks -// until ctx is cancelled. -func (r *Runner) Start(ctx context.Context) error { - var ticks <-chan time.Time - - if r.enabled { - ticker := time.NewTicker(r.refresh) - defer ticker.Stop() - ticks = ticker.C - } - - log := logging.FromContext(ctx).WithValues("controller", "registration") - - for { - select { - case <-ticks: - _, before := r.state() - if _, err := r.CheckToken(); err != nil { - log.Error(err, "Unable to validate token") - } - if _, after := r.state(); before != after && r.changed != nil { - r.changed() - } - case <-ctx.Done(): - // https://github.com/kubernetes-sigs/controller-runtime/issues/1927 - if errors.Is(ctx.Err(), context.Canceled) { - return nil - } - return ctx.Err() - } - } -} diff --git a/internal/registration/runner_test.go b/internal/registration/runner_test.go deleted file mode 100644 index 8e75848986..0000000000 --- a/internal/registration/runner_test.go +++ /dev/null @@ -1,574 +0,0 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package registration - -import ( - "context" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/golang-jwt/jwt/v5" - "gotest.tools/v3/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes/scheme" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/crunchydata/postgres-operator/internal/testing/events" -) - -func TestNewRunner(t *testing.T) { - t.Parallel() - - key, err := rsa.GenerateKey(rand.Reader, 2048) - assert.NilError(t, err) - - der, err := x509.MarshalPKIXPublicKey(&key.PublicKey) - assert.NilError(t, err) - - public := pem.EncodeToMemory(&pem.Block{Bytes: der}) - assert.Assert(t, len(public) != 0) - - t.Run("Disabled", func(t *testing.T) { - runner, err := NewRunner("", "", nil) - assert.NilError(t, err) - assert.Assert(t, runner != nil) - assert.Assert(t, !runner.enabled) - }) - - t.Run("ConfiguredCorrectly", func(t *testing.T) { - runner, err := NewRunner(string(public), "any", nil) - assert.NilError(t, err) - assert.Assert(t, runner != nil) - assert.Assert(t, runner.enabled) - - t.Run("ExtraLines", func(t *testing.T) { - input := "\n\n" + strings.ReplaceAll(string(public), "\n", "\n\n") + "\n\n" - - runner, err := NewRunner(input, "any", nil) - assert.NilError(t, err) - assert.Assert(t, runner != nil) - assert.Assert(t, runner.enabled) - }) - - t.Run("WithoutPEMBoundaries", func(t *testing.T) { - lines := strings.Split(strings.TrimSpace(string(public)), "\n") - lines = lines[1 : len(lines)-1] - - for _, input := range []string{ - strings.Join(lines, ""), // single line - strings.Join(lines, "\n"), // multi-line - "\n\n" + strings.Join(lines, "\n\n") + "\n\n", // extra lines - } { - runner, err := NewRunner(input, "any", nil) - assert.NilError(t, err) - assert.Assert(t, runner != nil) - assert.Assert(t, runner.enabled) - } - }) - }) - - t.Run("ConfiguredIncorrectly", func(t *testing.T) { - for _, tt := range []struct { - key, path, msg string - }{ - {msg: "public key", key: "", path: "any"}, - {msg: "token path", key: "bad", path: ""}, - {msg: "invalid key", key: "bad", path: "any"}, - {msg: "token path", key: string(public), path: ""}, - } { - _, err := NewRunner(tt.key, tt.path, nil) - assert.ErrorContains(t, err, tt.msg, "(key=%q, path=%q)", tt.key, tt.path) - } - }) -} - -func TestRunnerCheckToken(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - key, err := rsa.GenerateKey(rand.Reader, 2048) - assert.NilError(t, err) - - t.Run("SafeToCallDisabled", func(t *testing.T) { - r := Runner{enabled: false} - _, err := r.CheckToken() - assert.NilError(t, err) - }) - - t.Run("FileMissing", func(t *testing.T) { - r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} - _, err := r.CheckToken() - assert.NilError(t, err) - }) - - t.Run("FileUnreadable", func(t *testing.T) { - r := Runner{enabled: true, tokenPath: filepath.Join(dir, "nope")} - assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o200)) // Writeable - - _, err := r.CheckToken() - assert.ErrorContains(t, err, "permission") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("FileEmpty", func(t *testing.T) { - r := Runner{enabled: true, tokenPath: filepath.Join(dir, "empty")} - assert.NilError(t, os.WriteFile(r.tokenPath, nil, 0o400)) // Readable - - _, err := r.CheckToken() - assert.ErrorContains(t, err, "malformed") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("WrongAlgorithm", func(t *testing.T) { - r := Runner{ - enabled: true, - publicKey: &key.PublicKey, - tokenPath: filepath.Join(dir, "hs256"), - } - - // Maliciously treating an RSA public key as an HMAC secret. - // - https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/ - public, err := x509.MarshalPKIXPublicKey(r.publicKey) - assert.NilError(t, err) - data, err := jwt.New(jwt.SigningMethodHS256).SignedString(public) - assert.NilError(t, err) - assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - - _, err = r.CheckToken() - assert.Assert(t, err != nil, "HMAC algorithm should be rejected") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("MissingExpiration", func(t *testing.T) { - r := Runner{ - enabled: true, - publicKey: &key.PublicKey, - tokenPath: filepath.Join(dir, "no-claims"), - } - - data, err := jwt.New(jwt.SigningMethodRS256).SignedString(key) - assert.NilError(t, err) - assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - - _, err = r.CheckToken() - assert.ErrorContains(t, err, "exp claim is required") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("ExpiredToken", func(t *testing.T) { - r := Runner{ - enabled: true, - publicKey: &key.PublicKey, - tokenPath: filepath.Join(dir, "expired"), - } - - data, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ - "exp": jwt.NewNumericDate(time.Date(2020, 1, 1, 1, 1, 1, 1, time.UTC)), - }).SignedString(key) - assert.NilError(t, err) - assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - - _, err = r.CheckToken() - assert.ErrorContains(t, err, "is expired") - assert.Assert(t, r.token.ExpiresAt == nil) - }) - - t.Run("ValidToken", func(t *testing.T) { - r := Runner{ - enabled: true, - publicKey: &key.PublicKey, - tokenPath: filepath.Join(dir, "valid"), - } - - expiration := jwt.NewNumericDate(time.Now().Add(time.Hour)) - data, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ - "exp": expiration, - }).SignedString(key) - assert.NilError(t, err) - assert.NilError(t, os.WriteFile(r.tokenPath, []byte(data), 0o400)) // Readable - - token, err := r.CheckToken() - assert.NilError(t, err) - assert.Assert(t, r.token.ExpiresAt != nil) - assert.Assert(t, token.Valid) - exp, err := token.Claims.GetExpirationTime() - assert.NilError(t, err) - assert.Equal(t, exp.Time, expiration.Time) - }) -} - -func TestRunnerLeaderElectionRunnable(t *testing.T) { - var runner manager.LeaderElectionRunnable = &Runner{} - - assert.Assert(t, runner.NeedLeaderElection()) -} - -func TestRunnerRequiredConditions(t *testing.T) { - t.Parallel() - - t.Run("RegistrationDisabled", func(t *testing.T) { - r := Runner{enabled: false} - - for _, tt := range []struct { - before, after []metav1.Condition - }{ - { - before: []metav1.Condition{}, - after: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{ - {Type: "Registered"}, - {Type: "ExistingOther"}, - {Type: "RegistrationRequired"}, - }, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "TokenRequired"}}, - after: []metav1.Condition{}, - }, - } { - for _, exists := range []bool{false, true} { - for _, expires := range []time.Time{ - time.Now().Add(time.Hour), - time.Now().Add(-time.Hour), - } { - r.token.Exists = exists - r.token.ExpiresAt = jwt.NewNumericDate(expires) - - conditions := append([]metav1.Condition{}, tt.before...) - discard := new(events.Recorder) - object := &corev1.ConfigMap{} - - result := r.Required(discard, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.DeepEqual(t, conditions, tt.after) - } - } - } - }) - - t.Run("RegistrationRequired", func(t *testing.T) { - r := Runner{enabled: true} - - for _, tt := range []struct { - exists bool - expires time.Time - before []metav1.Condition - }{ - { - exists: false, expires: time.Now().Add(time.Hour), - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - { - exists: false, expires: time.Now().Add(-time.Hour), - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - { - exists: true, expires: time.Now().Add(-time.Hour), - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - } { - r.token.Exists = tt.exists - r.token.ExpiresAt = jwt.NewNumericDate(tt.expires) - - conditions := append([]metav1.Condition{}, tt.before...) - discard := new(events.Recorder) - object := &corev1.ConfigMap{} - - result := r.Required(discard, object, &conditions) - - assert.Equal(t, result, true, "expected registration required") - assert.DeepEqual(t, conditions, tt.before) - } - }) - - t.Run("Registered", func(t *testing.T) { - r := Runner{} - r.token.Exists = true - r.token.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Hour)) - - for _, tt := range []struct { - before, after []metav1.Condition - }{ - { - before: []metav1.Condition{}, - after: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{ - {Type: "Registered"}, - {Type: "ExistingOther"}, - {Type: "RegistrationRequired"}, - }, - after: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "TokenRequired"}}, - after: []metav1.Condition{}, - }, - } { - for _, enabled := range []bool{false, true} { - r.enabled = enabled - - conditions := append([]metav1.Condition{}, tt.before...) - discard := new(events.Recorder) - object := &corev1.ConfigMap{} - - result := r.Required(discard, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.DeepEqual(t, conditions, tt.after) - } - } - }) -} - -func TestRunnerRequiredEvents(t *testing.T) { - t.Parallel() - - t.Run("RegistrationDisabled", func(t *testing.T) { - r := Runner{enabled: false} - - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - } { - for _, exists := range []bool{false, true} { - for _, expires := range []time.Time{ - time.Now().Add(time.Hour), - time.Now().Add(-time.Hour), - } { - r.token.Exists = exists - r.token.ExpiresAt = jwt.NewNumericDate(expires) - - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, scheme.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.Equal(t, len(recorder.Events), 0, "expected no events") - } - } - } - }) - - t.Run("RegistrationRequired", func(t *testing.T) { - r := Runner{enabled: true} - - t.Run("MissingToken", func(t *testing.T) { - r.token.Exists = false - - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - } { - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, scheme.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, true, "expected registration required") - assert.Equal(t, len(recorder.Events), 0, "expected no events") - } - }) - - t.Run("InvalidToken", func(t *testing.T) { - r.token.Exists = true - r.token.ExpiresAt = nil - - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - } { - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, scheme.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, true, "expected registration required") - assert.Equal(t, len(recorder.Events), 1, "expected one event") - assert.Equal(t, recorder.Events[0].Type, "Warning") - assert.Equal(t, recorder.Events[0].Reason, "Token Authentication Failed") - } - }) - }) - - t.Run("Registered", func(t *testing.T) { - r := Runner{} - r.token.Exists = true - r.token.ExpiresAt = jwt.NewNumericDate(time.Now().Add(time.Hour)) - - t.Run("AlwaysRegistered", func(t *testing.T) { - // No prior registration conditions - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{}, - }, - { - before: []metav1.Condition{{Type: "ExistingOther"}}, - }, - } { - for _, enabled := range []bool{false, true} { - r.enabled = enabled - - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, scheme.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.Equal(t, len(recorder.Events), 0, "expected no events") - } - } - }) - - t.Run("PreviouslyUnregistered", func(t *testing.T) { - r.enabled = true - - // One or more prior registration conditions - for _, tt := range []struct { - before []metav1.Condition - }{ - { - before: []metav1.Condition{{Type: "Registered"}, {Type: "ExistingOther"}}, - }, - { - before: []metav1.Condition{ - {Type: "Registered"}, - {Type: "ExistingOther"}, - {Type: "RegistrationRequired"}, - }, - }, - { - before: []metav1.Condition{{Type: "TokenRequired"}}, - }, - } { - conditions := append([]metav1.Condition{}, tt.before...) - object := &corev1.ConfigMap{} - recorder := events.NewRecorder(t, scheme.Scheme) - - result := r.Required(recorder, object, &conditions) - - assert.Equal(t, result, false, "expected registration not required") - assert.Equal(t, len(recorder.Events), 1, "expected one event") - assert.Equal(t, recorder.Events[0].Type, "Normal") - assert.Equal(t, recorder.Events[0].Reason, "Token Verified") - } - }) - }) -} - -func TestRunnerStart(t *testing.T) { - t.Parallel() - - dir := t.TempDir() - key, err := rsa.GenerateKey(rand.Reader, 2048) - assert.NilError(t, err) - - token, err := jwt.NewWithClaims(jwt.SigningMethodRS256, jwt.MapClaims{ - "exp": jwt.NewNumericDate(time.Now().Add(time.Hour)), - }).SignedString(key) - assert.NilError(t, err) - - t.Run("DisabledDoesNothing", func(t *testing.T) { - runner := &Runner{ - enabled: false, - refresh: time.Nanosecond, - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - - assert.ErrorIs(t, runner.Start(ctx), context.DeadlineExceeded, - "expected it to block until context is canceled") - }) - - t.Run("WithCallback", func(t *testing.T) { - called := false - runner := &Runner{ - changed: func() { called = true }, - enabled: true, - publicKey: &key.PublicKey, - refresh: time.Second, - tokenPath: filepath.Join(dir, "token"), - } - - // Begin with an invalid token. - assert.NilError(t, os.WriteFile(runner.tokenPath, nil, 0o600)) - _, err = runner.CheckToken() - assert.Assert(t, err != nil) - - // Replace it with a valid token. - assert.NilError(t, os.WriteFile(runner.tokenPath, []byte(token), 0o600)) - - // Run with a timeout that exceeds the refresh interval. - ctx, cancel := context.WithTimeout(context.Background(), runner.refresh*3/2) - defer cancel() - - assert.ErrorIs(t, runner.Start(ctx), context.DeadlineExceeded) - assert.Assert(t, called, "expected a call back") - }) -} diff --git a/internal/registration/testing.go b/internal/registration/testing.go deleted file mode 100644 index 1418f6d2d3..0000000000 --- a/internal/registration/testing.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package registration - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/tools/record" - "sigs.k8s.io/controller-runtime/pkg/client" -) - -// NOTE: This type can go away following https://go.dev/issue/47487. - -type RegistrationFunc func(record.EventRecorder, client.Object, *[]metav1.Condition) bool - -func (fn RegistrationFunc) Required(rec record.EventRecorder, obj client.Object, conds *[]metav1.Condition) bool { - return fn(rec, obj, conds) -} - -var _ Registration = RegistrationFunc(nil) diff --git a/internal/testing/cmp/cmp.go b/internal/testing/cmp/cmp.go index 265a598064..8b996b3b96 100644 --- a/internal/testing/cmp/cmp.go +++ b/internal/testing/cmp/cmp.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/testing/events/recorder.go b/internal/testing/events/recorder.go index 23c03a4c40..dad5dccf83 100644 --- a/internal/testing/events/recorder.go +++ b/internal/testing/events/recorder.go @@ -1,4 +1,4 @@ -// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2022 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -89,7 +89,7 @@ func (*Recorder) AnnotatedEventf(object runtime.Object, annotations map[string]s } func (r *Recorder) Event(object runtime.Object, eventtype, reason, message string) { if r.eventf != nil { - r.eventf(object, nil, eventtype, reason, "", message) + r.eventf(object, nil, eventtype, reason, "", "%v", message) } } func (r *Recorder) Eventf(object runtime.Object, eventtype, reason, messageFmt string, args ...any) { diff --git a/internal/testing/require/exec.go b/internal/testing/require/exec.go index c182e84996..a9e028c55e 100644 --- a/internal/testing/require/exec.go +++ b/internal/testing/require/exec.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -38,7 +38,7 @@ func executable(name string, args ...string) func(testing.TB) string { t.Helper() once.Do(func() { path, err := exec.LookPath(name) - cmd := exec.Command(path, args...) // #nosec G204 -- args from init() + cmd := exec.CommandContext(t.Context(), path, args...) // #nosec G204 -- args from init() if err != nil { result = func(t testing.TB) string { diff --git a/internal/testing/require/kubernetes.go b/internal/testing/require/kubernetes.go index df21bca058..dc1fb0c73c 100644 --- a/internal/testing/require/kubernetes.go +++ b/internal/testing/require/kubernetes.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/testing/require/parallel.go b/internal/testing/require/parallel.go index 4fbdf42284..6d4f73d45f 100644 --- a/internal/testing/require/parallel.go +++ b/internal/testing/require/parallel.go @@ -1,4 +1,4 @@ -// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2022 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/testing/validation/postgrescluster_test.go b/internal/testing/validation/postgrescluster_test.go index e71ff22b2e..d1f1421988 100644 --- a/internal/testing/validation/postgrescluster_test.go +++ b/internal/testing/validation/postgrescluster_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -63,7 +63,6 @@ func TestPostgresUserOptions(t *testing.T) { assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, "cannot contain comments") - //nolint:errorlint // This is a test, and a panic is unlikely. status := err.(apierrors.APIStatus).Status() assert.Assert(t, status.Details != nil) assert.Equal(t, len(status.Details.Causes), 3) @@ -85,7 +84,6 @@ func TestPostgresUserOptions(t *testing.T) { assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, "cannot assign password") - //nolint:errorlint // This is a test, and a panic is unlikely. status := err.(apierrors.APIStatus).Status() assert.Assert(t, status.Details != nil) assert.Equal(t, len(status.Details.Causes), 2) @@ -106,7 +104,6 @@ func TestPostgresUserOptions(t *testing.T) { assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, "should match") - //nolint:errorlint // This is a test, and a panic is unlikely. status := err.(apierrors.APIStatus).Status() assert.Assert(t, status.Details != nil) assert.Equal(t, len(status.Details.Causes), 1) diff --git a/internal/upgradecheck/header.go b/internal/upgradecheck/header.go deleted file mode 100644 index 766de8dd07..0000000000 --- a/internal/upgradecheck/header.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "encoding/json" - "net/http" - "os" - - googleuuid "github.com/google/uuid" - corev1 "k8s.io/api/core/v1" - apierrors "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/client-go/discovery" - "k8s.io/client-go/rest" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/crunchydata/postgres-operator/internal/feature" - "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -const ( - clientHeader = "X-Crunchy-Client-Metadata" -) - -var ( - // Using apimachinery's UUID package, so our deployment UUID will be a string - deploymentID string -) - -// Extensible struct for client upgrade data -type clientUpgradeData struct { - BridgeClustersTotal int `json:"bridge_clusters_total"` - BuildSource string `json:"build_source"` - DeploymentID string `json:"deployment_id"` - FeatureGatesEnabled string `json:"feature_gates_enabled"` - IsOpenShift bool `json:"is_open_shift"` - KubernetesEnv string `json:"kubernetes_env"` - PGOClustersTotal int `json:"pgo_clusters_total"` - PGOInstaller string `json:"pgo_installer"` - PGOInstallerOrigin string `json:"pgo_installer_origin"` - PGOVersion string `json:"pgo_version"` - RegistrationToken string `json:"registration_token"` -} - -// generateHeader aggregates data and returns a struct of that data -// If any errors are encountered, it logs those errors and uses the default values -func generateHeader(ctx context.Context, cfg *rest.Config, crClient crclient.Client, - pgoVersion string, isOpenShift bool, registrationToken string) *clientUpgradeData { - - return &clientUpgradeData{ - BridgeClustersTotal: getBridgeClusters(ctx, crClient), - BuildSource: os.Getenv("BUILD_SOURCE"), - DeploymentID: ensureDeploymentID(ctx, crClient), - FeatureGatesEnabled: feature.ShowGates(ctx), - IsOpenShift: isOpenShift, - KubernetesEnv: getServerVersion(ctx, cfg), - PGOClustersTotal: getManagedClusters(ctx, crClient), - PGOInstaller: os.Getenv("PGO_INSTALLER"), - PGOInstallerOrigin: os.Getenv("PGO_INSTALLER_ORIGIN"), - PGOVersion: pgoVersion, - RegistrationToken: registrationToken, - } -} - -// ensureDeploymentID checks if the UUID exists in memory or in a ConfigMap -// If no UUID exists, ensureDeploymentID creates one and saves it in memory/as a ConfigMap -// Any errors encountered will be logged and the ID result will be what is in memory -func ensureDeploymentID(ctx context.Context, crClient crclient.Client) string { - // If there is no deploymentID in memory, generate one for possible use - if deploymentID == "" { - deploymentID = string(uuid.NewUUID()) - } - - cm := manageUpgradeCheckConfigMap(ctx, crClient, deploymentID) - - if cm != nil && cm.Data["deployment_id"] != "" { - deploymentID = cm.Data["deployment_id"] - } - - return deploymentID -} - -// manageUpgradeCheckConfigMap ensures a ConfigMap exists with a UUID -// If it doesn't exist, this creates it with the in-memory ID -// If it exists and it has a valid UUID, use that to replace the in-memory ID -// If it exists but the field is blank or mangled, we update the ConfigMap with the in-memory ID -func manageUpgradeCheckConfigMap(ctx context.Context, crClient crclient.Client, - currentID string) *corev1.ConfigMap { - - log := logging.FromContext(ctx) - upgradeCheckConfigMapMetadata := naming.UpgradeCheckConfigMap() - - cm := &corev1.ConfigMap{ - ObjectMeta: upgradeCheckConfigMapMetadata, - Data: map[string]string{"deployment_id": currentID}, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - - // If no namespace is set, then log this and skip trying to set the UUID in the ConfigMap - if upgradeCheckConfigMapMetadata.GetNamespace() == "" { - log.V(1).Info("upgrade check issue: namespace not set") - return cm - } - - retrievedCM := &corev1.ConfigMap{} - err := crClient.Get(ctx, naming.AsObjectKey(upgradeCheckConfigMapMetadata), retrievedCM) - - // If we get any error besides IsNotFound, log it, skip any ConfigMap steps, - // and use the in-memory deploymentID - if err != nil && !apierrors.IsNotFound(err) { - log.V(1).Info("upgrade check issue: error retrieving configmap", - "response", err.Error()) - return cm - } - - // If we get a ConfigMap with a "deployment_id", check if that UUID is valid - if retrievedCM.Data["deployment_id"] != "" { - _, parseErr := googleuuid.Parse(retrievedCM.Data["deployment_id"]) - // No error -- the ConfigMap has a valid deploymentID, so use that - if parseErr == nil { - cm.Data["deployment_id"] = retrievedCM.Data["deployment_id"] - } - } - - err = applyConfigMap(ctx, crClient, cm, currentID) - if err != nil { - log.V(1).Info("upgrade check issue: could not apply configmap", - "response", err.Error()) - } - return cm -} - -// applyConfigMap is a focused version of the Reconciler.apply method, -// meant only to work with this ConfigMap -// It sends an apply patch to the Kubernetes API, with the fieldManager set to the deployment_id -// and the force parameter set to true. -// - https://docs.k8s.io/reference/using-api/server-side-apply/#managers -// - https://docs.k8s.io/reference/using-api/server-side-apply/#conflicts -func applyConfigMap(ctx context.Context, crClient crclient.Client, - object crclient.Object, owner string) error { - // Generate an apply-patch by comparing the object to its zero value. - zero := &corev1.ConfigMap{} - data, err := crclient.MergeFrom(zero).Data(object) - - if err == nil { - apply := crclient.RawPatch(crclient.Apply.Type(), data) - err = crClient.Patch(ctx, object, apply, - []crclient.PatchOption{crclient.ForceOwnership, crclient.FieldOwner(owner)}...) - } - return err -} - -// getManagedClusters returns a count of postgres clusters managed by this PGO instance -// Any errors encountered will be logged and the count result will be 0 -func getManagedClusters(ctx context.Context, crClient crclient.Client) int { - var count int - clusters := &v1beta1.PostgresClusterList{} - err := crClient.List(ctx, clusters) - if err != nil { - log := logging.FromContext(ctx) - log.V(1).Info("upgrade check issue: could not count postgres clusters", - "response", err.Error()) - } else { - count = len(clusters.Items) - } - return count -} - -// getBridgeClusters returns a count of Bridge clusters managed by this PGO instance -// Any errors encountered will be logged and the count result will be 0 -func getBridgeClusters(ctx context.Context, crClient crclient.Client) int { - var count int - clusters := &v1beta1.CrunchyBridgeClusterList{} - err := crClient.List(ctx, clusters) - if err != nil { - log := logging.FromContext(ctx) - log.V(1).Info("upgrade check issue: could not count bridge clusters", - "response", err.Error()) - } else { - count = len(clusters.Items) - } - return count -} - -// getServerVersion returns the stringified server version (i.e., the same info `kubectl version` -// returns for the server) -// Any errors encountered will be logged and will return an empty string -func getServerVersion(ctx context.Context, cfg *rest.Config) string { - log := logging.FromContext(ctx) - discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg) - if err != nil { - log.V(1).Info("upgrade check issue: could not retrieve discovery client", - "response", err.Error()) - return "" - } - versionInfo, err := discoveryClient.ServerVersion() - if err != nil { - log.V(1).Info("upgrade check issue: could not retrieve server version", - "response", err.Error()) - return "" - } - return versionInfo.String() -} - -func addHeader(req *http.Request, upgradeInfo *clientUpgradeData) (*http.Request, error) { - marshaled, err := json.Marshal(upgradeInfo) - if err == nil { - upgradeInfoString := string(marshaled) - req.Header.Add(clientHeader, upgradeInfoString) - } - return req, err -} diff --git a/internal/upgradecheck/header_test.go b/internal/upgradecheck/header_test.go deleted file mode 100644 index c144e7629b..0000000000 --- a/internal/upgradecheck/header_test.go +++ /dev/null @@ -1,611 +0,0 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "encoding/json" - "net/http" - "strings" - "testing" - - "gotest.tools/v3/assert" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/client-go/discovery" - - // Google Kubernetes Engine / Google Cloud Platform authentication provider - _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" - "k8s.io/client-go/rest" - - "github.com/crunchydata/postgres-operator/internal/controller/postgrescluster" - "github.com/crunchydata/postgres-operator/internal/feature" - "github.com/crunchydata/postgres-operator/internal/naming" - "github.com/crunchydata/postgres-operator/internal/testing/cmp" - "github.com/crunchydata/postgres-operator/internal/testing/require" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -func TestGenerateHeader(t *testing.T) { - setupDeploymentID(t) - ctx := context.Background() - cfg, cc := require.Kubernetes2(t) - setupNamespace(t, cc) - - dc, err := discovery.NewDiscoveryClientForConfig(cfg) - assert.NilError(t, err) - server, err := dc.ServerVersion() - assert.NilError(t, err) - - reconciler := postgrescluster.Reconciler{Client: cc} - - t.Setenv("PGO_INSTALLER", "test") - t.Setenv("PGO_INSTALLER_ORIGIN", "test-origin") - t.Setenv("BUILD_SOURCE", "developer") - - t.Run("error ensuring ID", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "patch error", - } - ctx, calls := setupLogCapture(ctx) - - res := generateHeader(ctx, cfg, fakeClientWithOptionalError, - "1.2.3", reconciler.IsOpenShift, "") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) - assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) - assert.Equal(t, deploymentID, res.DeploymentID) - pgoList := v1beta1.PostgresClusterList{} - err := cc.List(ctx, &pgoList) - assert.NilError(t, err) - assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) - bridgeList := v1beta1.CrunchyBridgeClusterList{} - err = cc.List(ctx, &bridgeList) - assert.NilError(t, err) - assert.Equal(t, len(bridgeList.Items), res.BridgeClustersTotal) - assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, server.String(), res.KubernetesEnv) - assert.Equal(t, "test", res.PGOInstaller) - assert.Equal(t, "test-origin", res.PGOInstallerOrigin) - assert.Equal(t, "developer", res.BuildSource) - }) - - t.Run("error getting cluster count", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "list error", - } - ctx, calls := setupLogCapture(ctx) - - res := generateHeader(ctx, cfg, fakeClientWithOptionalError, - "1.2.3", reconciler.IsOpenShift, "") - assert.Equal(t, len(*calls), 2) - // Aggregating the logs since we cannot determine which call will be first - callsAggregate := strings.Join(*calls, " ") - assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count postgres clusters`)) - assert.Assert(t, cmp.Contains(callsAggregate, `upgrade check issue: could not count bridge clusters`)) - assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) - assert.Equal(t, deploymentID, res.DeploymentID) - assert.Equal(t, 0, res.PGOClustersTotal) - assert.Equal(t, 0, res.BridgeClustersTotal) - assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, server.String(), res.KubernetesEnv) - assert.Equal(t, "test", res.PGOInstaller) - assert.Equal(t, "test-origin", res.PGOInstallerOrigin) - assert.Equal(t, "developer", res.BuildSource) - }) - - t.Run("error getting server version info", func(t *testing.T) { - ctx, calls := setupLogCapture(ctx) - badcfg := &rest.Config{} - - res := generateHeader(ctx, badcfg, cc, - "1.2.3", reconciler.IsOpenShift, "") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not retrieve server version`)) - assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) - assert.Equal(t, deploymentID, res.DeploymentID) - pgoList := v1beta1.PostgresClusterList{} - err := cc.List(ctx, &pgoList) - assert.NilError(t, err) - assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) - assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, "", res.KubernetesEnv) - assert.Equal(t, "test", res.PGOInstaller) - assert.Equal(t, "test-origin", res.PGOInstallerOrigin) - assert.Equal(t, "developer", res.BuildSource) - }) - - t.Run("success", func(t *testing.T) { - ctx, calls := setupLogCapture(ctx) - gate := feature.NewGate() - assert.NilError(t, gate.SetFromMap(map[string]bool{ - feature.TablespaceVolumes: true, - })) - ctx = feature.NewContext(ctx, gate) - - res := generateHeader(ctx, cfg, cc, - "1.2.3", reconciler.IsOpenShift, "") - assert.Equal(t, len(*calls), 0) - assert.Equal(t, res.IsOpenShift, reconciler.IsOpenShift) - assert.Equal(t, deploymentID, res.DeploymentID) - pgoList := v1beta1.PostgresClusterList{} - err := cc.List(ctx, &pgoList) - assert.NilError(t, err) - assert.Equal(t, len(pgoList.Items), res.PGOClustersTotal) - assert.Equal(t, "1.2.3", res.PGOVersion) - assert.Equal(t, server.String(), res.KubernetesEnv) - assert.Equal(t, "TablespaceVolumes=true", res.FeatureGatesEnabled) - assert.Equal(t, "test", res.PGOInstaller) - assert.Equal(t, "test-origin", res.PGOInstallerOrigin) - assert.Equal(t, "developer", res.BuildSource) - }) -} - -func TestEnsureID(t *testing.T) { - ctx := context.Background() - cc := require.Kubernetes(t) - setupNamespace(t, cc) - - t.Run("success, no id set in mem or configmap", func(t *testing.T) { - deploymentID = "" - oldID := deploymentID - ctx, calls := setupLogCapture(ctx) - - newID := ensureDeploymentID(ctx, cc) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, newID != oldID) - assert.Assert(t, newID == deploymentID) - - cm := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cm) - assert.NilError(t, err) - assert.Equal(t, newID, cm.Data["deployment_id"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("success, id set in mem, configmap created", func(t *testing.T) { - oldID := setupDeploymentID(t) - - cm := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cm) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - ctx, calls := setupLogCapture(ctx) - - newID := ensureDeploymentID(ctx, cc) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, newID == oldID) - assert.Assert(t, newID == deploymentID) - - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cm) - assert.NilError(t, err) - assert.Assert(t, deploymentID == cm.Data["deployment_id"]) - - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("success, id set in configmap, mem overwritten", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "deployment_id": string(uuid.NewUUID()), - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - oldID := setupDeploymentID(t) - ctx, calls := setupLogCapture(ctx) - newID := ensureDeploymentID(ctx, cc) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, newID != oldID) - assert.Assert(t, newID == deploymentID) - assert.Assert(t, deploymentID == cmRetrieved.Data["deployment_id"]) - - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("configmap failed, no namespace given", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "deployment_id": string(uuid.NewUUID()), - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - oldID := setupDeploymentID(t) - ctx, calls := setupLogCapture(ctx) - t.Setenv("PGO_NAMESPACE", "") - - newID := ensureDeploymentID(ctx, cc) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: namespace not set`)) - assert.Assert(t, newID == oldID) - assert.Assert(t, newID == deploymentID) - assert.Assert(t, deploymentID != cmRetrieved.Data["deployment_id"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("configmap failed with not NotFound error, using preexisting ID", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "get error", - } - oldID := setupDeploymentID(t) - ctx, calls := setupLogCapture(ctx) - - newID := ensureDeploymentID(ctx, fakeClientWithOptionalError) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: error retrieving configmap`)) - assert.Assert(t, newID == oldID) - assert.Assert(t, newID == deploymentID) - - cmRetrieved := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - }) - - t.Run("configmap failed to create, using preexisting ID", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "patch error", - } - oldID := setupDeploymentID(t) - - ctx, calls := setupLogCapture(ctx) - newID := ensureDeploymentID(ctx, fakeClientWithOptionalError) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) - assert.Assert(t, newID == oldID) - assert.Assert(t, newID == deploymentID) - }) -} - -func TestManageUpgradeCheckConfigMap(t *testing.T) { - ctx := context.Background() - cc := require.Kubernetes(t) - setupNamespace(t, cc) - - t.Run("no namespace given", func(t *testing.T) { - ctx, calls := setupLogCapture(ctx) - t.Setenv("PGO_NAMESPACE", "") - - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: namespace not set`)) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - }) - - t.Run("configmap not found, created", func(t *testing.T) { - cmRetrieved := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - - assert.Equal(t, len(*calls), 0) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - err = cc.Delete(ctx, returnedCM) - assert.NilError(t, err) - }) - - t.Run("configmap failed with not NotFound error", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "get error", - } - ctx, calls := setupLogCapture(ctx) - - returnedCM := manageUpgradeCheckConfigMap(ctx, fakeClientWithOptionalError, - "current-id") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: error retrieving configmap`)) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - }) - - t.Run("no deployment id in configmap", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "wrong_field": string(uuid.NewUUID()), - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - assert.Equal(t, len(*calls), 0) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("mangled deployment id", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "deploymentid": string(uuid.NewUUID())[1:], - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - assert.Equal(t, len(*calls), 0) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("good configmap with good id", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "deployment_id": string(uuid.NewUUID()), - }, - } - err := cc.Create(ctx, cm) - assert.NilError(t, err) - - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey( - naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, cc, "current-id") - assert.Equal(t, len(*calls), 0) - assert.Assert(t, returnedCM.Data["deployment-id"] != "current-id") - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("configmap failed to create", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "patch error", - } - - ctx, calls := setupLogCapture(ctx) - returnedCM := manageUpgradeCheckConfigMap(ctx, fakeClientWithOptionalError, - "current-id") - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not apply configmap`)) - assert.Assert(t, returnedCM.Data["deployment_id"] == "current-id") - }) -} - -func TestApplyConfigMap(t *testing.T) { - ctx := context.Background() - cc := require.Kubernetes(t) - setupNamespace(t, cc) - - t.Run("successful create", func(t *testing.T) { - cmRetrieved := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err = applyConfigMap(ctx, cc, cm, "test") - assert.NilError(t, err) - cmRetrieved = &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - assert.Equal(t, cm.Data["new_value"], cmRetrieved.Data["new_value"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("successful update", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "old_value", - }, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err := cc.Create(ctx, cm) - assert.NilError(t, err) - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - cm2 := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm2.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err = applyConfigMap(ctx, cc, cm2, "test") - assert.NilError(t, err) - cmRetrieved = &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - assert.Equal(t, cm.Data["new_value"], cmRetrieved.Data["new_value"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("successful nothing changed", func(t *testing.T) { - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err := cc.Create(ctx, cm) - assert.NilError(t, err) - cmRetrieved := &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - - cm2 := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm2.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - err = applyConfigMap(ctx, cc, cm2, "test") - assert.NilError(t, err) - cmRetrieved = &corev1.ConfigMap{} - err = cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.NilError(t, err) - assert.Equal(t, cm.Data["new_value"], cmRetrieved.Data["new_value"]) - err = cc.Delete(ctx, cm) - assert.NilError(t, err) - }) - - t.Run("failure", func(t *testing.T) { - cmRetrieved := &corev1.ConfigMap{} - err := cc.Get(ctx, naming.AsObjectKey(naming.UpgradeCheckConfigMap()), cmRetrieved) - assert.Error(t, err, `configmaps "pgo-upgrade-check" not found`) - - cm := &corev1.ConfigMap{ - ObjectMeta: naming.UpgradeCheckConfigMap(), - Data: map[string]string{ - "new_field": "new_value", - }, - } - cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) - fakeClientWithOptionalError := &fakeClientWithError{ - cc, "patch error", - } - - err = applyConfigMap(ctx, fakeClientWithOptionalError, cm, "test") - assert.Error(t, err, "patch error") - }) -} - -func TestGetManagedClusters(t *testing.T) { - ctx := context.Background() - - t.Run("success", func(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, true) - ctx, calls := setupLogCapture(ctx) - count := getManagedClusters(ctx, fakeClient) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, count == 2) - }) - - t.Run("list throw error", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - setupFakeClientWithPGOScheme(t, true), "list error", - } - ctx, calls := setupLogCapture(ctx) - count := getManagedClusters(ctx, fakeClientWithOptionalError) - assert.Assert(t, len(*calls) > 0) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count postgres clusters`)) - assert.Assert(t, count == 0) - }) -} - -func TestGetBridgeClusters(t *testing.T) { - ctx := context.Background() - - t.Run("success", func(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, true) - ctx, calls := setupLogCapture(ctx) - count := getBridgeClusters(ctx, fakeClient) - assert.Equal(t, len(*calls), 0) - assert.Assert(t, count == 2) - }) - - t.Run("list throw error", func(t *testing.T) { - fakeClientWithOptionalError := &fakeClientWithError{ - setupFakeClientWithPGOScheme(t, true), "list error", - } - ctx, calls := setupLogCapture(ctx) - count := getBridgeClusters(ctx, fakeClientWithOptionalError) - assert.Assert(t, len(*calls) > 0) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not count bridge clusters`)) - assert.Assert(t, count == 0) - }) -} - -func TestGetServerVersion(t *testing.T) { - t.Run("success", func(t *testing.T) { - expect, server := setupVersionServer(t, true) - ctx, calls := setupLogCapture(context.Background()) - - got := getServerVersion(ctx, &rest.Config{ - Host: server.URL, - }) - assert.Equal(t, len(*calls), 0) - assert.Equal(t, got, expect.String()) - }) - - t.Run("failure", func(t *testing.T) { - _, server := setupVersionServer(t, false) - ctx, calls := setupLogCapture(context.Background()) - - got := getServerVersion(ctx, &rest.Config{ - Host: server.URL, - }) - assert.Equal(t, len(*calls), 1) - assert.Assert(t, cmp.Contains((*calls)[0], `upgrade check issue: could not retrieve server version`)) - assert.Equal(t, got, "") - }) -} - -func TestAddHeader(t *testing.T) { - t.Run("successful", func(t *testing.T) { - req := &http.Request{ - Header: http.Header{}, - } - versionString := "1.2.3" - upgradeInfo := &clientUpgradeData{ - PGOVersion: versionString, - } - - result, err := addHeader(req, upgradeInfo) - assert.NilError(t, err) - header := result.Header[clientHeader] - - passedThroughData := &clientUpgradeData{} - err = json.Unmarshal([]byte(header[0]), passedThroughData) - assert.NilError(t, err) - - assert.Equal(t, passedThroughData.PGOVersion, "1.2.3") - // Failure to list clusters results in 0 returned - assert.Equal(t, passedThroughData.PGOClustersTotal, 0) - }) -} diff --git a/internal/upgradecheck/helpers_test.go b/internal/upgradecheck/helpers_test.go deleted file mode 100644 index 63184184db..0000000000 --- a/internal/upgradecheck/helpers_test.go +++ /dev/null @@ -1,179 +0,0 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/http/httptest" - "testing" - - "github.com/go-logr/logr/funcr" - "gotest.tools/v3/assert" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/uuid" - "k8s.io/apimachinery/pkg/version" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/client/fake" - - "github.com/crunchydata/postgres-operator/internal/controller/runtime" - "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" -) - -// fakeClientWithError is a controller runtime client and an error type to force -type fakeClientWithError struct { - crclient.Client - errorType string -} - -// Get returns the client.get OR an Error (`get error`) if the fakeClientWithError is set to error that way -func (f *fakeClientWithError) Get(ctx context.Context, key types.NamespacedName, obj crclient.Object, opts ...crclient.GetOption) error { - switch f.errorType { - case "get error": - return fmt.Errorf("get error") - default: - return f.Client.Get(ctx, key, obj, opts...) - } -} - -// Patch returns the client.get OR an Error (`patch error`) if the fakeClientWithError is set to error that way -// TODO: PatchType is not supported currently by fake -// - https://github.com/kubernetes/client-go/issues/970 -// Once that gets fixed, we can test without envtest -func (f *fakeClientWithError) Patch(ctx context.Context, obj crclient.Object, - patch crclient.Patch, opts ...crclient.PatchOption) error { - switch { - case f.errorType == "patch error": - return fmt.Errorf("patch error") - default: - return f.Client.Patch(ctx, obj, patch, opts...) - } -} - -// List returns the client.get OR an Error (`list error`) if the fakeClientWithError is set to error that way -func (f *fakeClientWithError) List(ctx context.Context, objList crclient.ObjectList, - opts ...crclient.ListOption) error { - switch f.errorType { - case "list error": - return fmt.Errorf("list error") - default: - return f.Client.List(ctx, objList, opts...) - } -} - -// setupDeploymentID returns a UUID -func setupDeploymentID(t *testing.T) string { - t.Helper() - deploymentID = string(uuid.NewUUID()) - return deploymentID -} - -// setupFakeClientWithPGOScheme returns a fake client with the PGO scheme added; -// if `includeCluster` is true, also adds some empty PostgresCluster and CrunchyBridgeCluster -// items to the client -func setupFakeClientWithPGOScheme(t *testing.T, includeCluster bool) crclient.Client { - t.Helper() - if includeCluster { - pc := &v1beta1.PostgresClusterList{ - Items: []v1beta1.PostgresCluster{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "hippo", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "elephant", - }, - }, - }, - } - - bcl := &v1beta1.CrunchyBridgeClusterList{ - Items: []v1beta1.CrunchyBridgeCluster{ - { - ObjectMeta: metav1.ObjectMeta{ - Name: "hippo", - }, - }, - { - ObjectMeta: metav1.ObjectMeta{ - Name: "elephant", - }, - }, - }, - } - - return fake.NewClientBuilder(). - WithScheme(runtime.Scheme). - WithLists(pc, bcl). - Build() - } - return fake.NewClientBuilder().WithScheme(runtime.Scheme).Build() -} - -// setupVersionServer sets up and tears down a server and version info for testing -func setupVersionServer(t *testing.T, works bool) (version.Info, *httptest.Server) { - t.Helper() - expect := version.Info{ - Major: "1", - Minor: "22", - GitCommit: "v1.22.2", - } - server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, - req *http.Request) { - if works { - output, _ := json.Marshal(expect) - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(http.StatusOK) - // We don't need to check the error output from this - _, _ = w.Write(output) - } else { - w.WriteHeader(http.StatusBadRequest) - } - })) - t.Cleanup(server.Close) - - return expect, server -} - -// setupLogCapture captures the logs and keeps count of the logs captured -func setupLogCapture(ctx context.Context) (context.Context, *[]string) { - calls := []string{} - testlog := funcr.NewJSON(func(object string) { - calls = append(calls, object) - }, funcr.Options{ - Verbosity: 1, - }) - return logging.NewContext(ctx, testlog), &calls -} - -// setupNamespace creates a namespace that will be deleted by t.Cleanup. -// For upgradechecking, this namespace is set to `postgres-operator`, -// which sometimes is created by other parts of the testing apparatus, -// cf., the createnamespace call in `make check-envtest-existing`. -// When creation fails, it calls t.Fatal. The caller may delete the namespace -// at any time. -func setupNamespace(t testing.TB, cc crclient.Client) { - t.Helper() - ns := &corev1.Namespace{} - ns.Name = "postgres-operator" - ns.Labels = map[string]string{"postgres-operator-test": t.Name()} - - ctx := context.Background() - exists := &corev1.Namespace{} - assert.NilError(t, crclient.IgnoreNotFound( - cc.Get(ctx, crclient.ObjectKeyFromObject(ns), exists))) - if exists.Name != "" { - return - } - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, crclient.IgnoreNotFound(cc.Delete(ctx, ns))) }) -} diff --git a/internal/upgradecheck/http.go b/internal/upgradecheck/http.go deleted file mode 100644 index 71a3c465c0..0000000000 --- a/internal/upgradecheck/http.go +++ /dev/null @@ -1,201 +0,0 @@ -// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "fmt" - "io" - "net/http" - "time" - - "github.com/golang-jwt/jwt/v5" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" - crclient "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/crunchydata/postgres-operator/internal/logging" -) - -var ( - client HTTPClient - - // With these Backoff settings, wait.ExponentialBackoff will - // * use one second as the base time; - // * increase delays between calls by a power of 2 (1, 2, 4, etc.); - // * and retry four times. - // Note that there is no indeterminacy here since there is no Jitter set). - // With these parameters, the calls will occur at 0, 1, 3, and 7 seconds - // (i.e., at 1, 2, and 4 second delays for the retries). - backoff = wait.Backoff{ - Duration: 1 * time.Second, - Factor: float64(2), - Steps: 4, - } -) - -const ( - // upgradeCheckURL can be set using the CHECK_FOR_UPGRADES_URL env var - upgradeCheckURL = "https://operator-maestro.crunchydata.com/pgo-versions" -) - -type HTTPClient interface { - Do(req *http.Request) (*http.Response, error) -} - -// Creating an interface for cache with WaitForCacheSync to allow easier mocking -type CacheWithWait interface { - WaitForCacheSync(ctx context.Context) bool -} - -func init() { - // Since we create this client once during startup, - // we want each connection to be fresh, hence the non-default transport - // with DisableKeepAlives set to true - // See https://github.com/golang/go/issues/43905 and https://github.com/golang/go/issues/23427 - // for discussion of problems with long-lived connections - client = &http.Client{ - Timeout: 5 * time.Second, - Transport: &http.Transport{ - DisableKeepAlives: true, - }, - } -} - -func checkForUpgrades(ctx context.Context, url, versionString string, backoff wait.Backoff, - crclient crclient.Client, cfg *rest.Config, - isOpenShift bool, registrationToken string) (message string, header string, err error) { - var headerPayloadStruct *clientUpgradeData - - // Prep request - req, err := http.NewRequest("GET", url, nil) - if err == nil { - // generateHeader always returns some sort of struct, using defaults/nil values - // in case some of the checks return errors - headerPayloadStruct = generateHeader(ctx, cfg, crclient, - versionString, isOpenShift, registrationToken) - req, err = addHeader(req, headerPayloadStruct) - } - - // wait.ExponentialBackoff will retry the func according to the backoff object until - // (a) func returns done as true or - // (b) the backoff settings are exhausted, - // i.e., the process hits the cap for time or the number of steps - // The anonymous function here sets certain preexisting variables (bodyBytes, err, status) - // which are then used by the surrounding `checkForUpgrades` function as part of the return - var bodyBytes []byte - var status int - - if err == nil { - _ = wait.ExponentialBackoff( - backoff, - func() (done bool, backoffErr error) { - var res *http.Response - res, err = client.Do(req) - - if err == nil { - defer res.Body.Close() - status = res.StatusCode - - // This is a very basic check, ignoring nuances around - // certain StatusCodes that should either prevent or impact retries - if status == http.StatusOK { - bodyBytes, err = io.ReadAll(res.Body) - return true, nil - } - } - - // Return false, nil to continue checking - return false, nil - }) - } - - // We received responses, but none of them were 200 OK. - if err == nil && status != http.StatusOK { - err = fmt.Errorf("received StatusCode %d", status) - } - - // TODO: Parse response and log info for user on potential upgrades - return string(bodyBytes), req.Header.Get(clientHeader), err -} - -type CheckForUpgradesScheduler struct { - Client crclient.Client - Config *rest.Config - - OpenShift bool - Refresh time.Duration - RegistrationToken string - URL, Version string -} - -// ManagedScheduler creates a [CheckForUpgradesScheduler] and adds it to m. -// NOTE(registration): This takes a token/nil parameter when the operator is started. -// Currently the operator restarts when the token is updated, -// so this token is always current; but if that restart behavior is changed, -// we will want the upgrade mechanism to instantiate its own registration runner -// or otherwise get the most recent token. -func ManagedScheduler(m manager.Manager, openshift bool, - url, version string, registrationToken *jwt.Token) error { - if url == "" { - url = upgradeCheckURL - } - - var token string - if registrationToken != nil { - token = registrationToken.Raw - } - - return m.Add(&CheckForUpgradesScheduler{ - Client: m.GetClient(), - Config: m.GetConfig(), - OpenShift: openshift, - Refresh: 24 * time.Hour, - RegistrationToken: token, - URL: url, - Version: version, - }) -} - -// NeedLeaderElection returns true so that s runs only on the single -// [manager.Manager] that is elected leader in the Kubernetes cluster. -func (s *CheckForUpgradesScheduler) NeedLeaderElection() bool { return true } - -// Start checks for upgrades periodically. It blocks until ctx is cancelled. -func (s *CheckForUpgradesScheduler) Start(ctx context.Context) error { - s.check(ctx) - - ticker := time.NewTicker(s.Refresh) - defer ticker.Stop() - - for { - select { - case <-ticker.C: - s.check(ctx) - case <-ctx.Done(): - return ctx.Err() - } - } -} - -func (s *CheckForUpgradesScheduler) check(ctx context.Context) { - log := logging.FromContext(ctx) - - defer func() { - if v := recover(); v != nil { - log.V(1).Info("encountered panic in upgrade check", "response", v) - } - }() - - info, header, err := checkForUpgrades(ctx, - s.URL, s.Version, backoff, s.Client, s.Config, s.OpenShift, s.RegistrationToken) - - if err != nil { - log.V(1).Info("could not complete upgrade check", "response", err.Error()) - } else { - log.Info(info, clientHeader, header) - } -} diff --git a/internal/upgradecheck/http_test.go b/internal/upgradecheck/http_test.go deleted file mode 100644 index 9535f942ea..0000000000 --- a/internal/upgradecheck/http_test.go +++ /dev/null @@ -1,236 +0,0 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. -// -// SPDX-License-Identifier: Apache-2.0 - -package upgradecheck - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "strings" - "testing" - "time" - - "github.com/go-logr/logr/funcr" - "gotest.tools/v3/assert" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/rest" - "sigs.k8s.io/controller-runtime/pkg/manager" - - "github.com/crunchydata/postgres-operator/internal/feature" - "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/internal/testing/cmp" -) - -func init() { - client = &MockClient{Timeout: 1} - // set backoff to two steps, 1 second apart for testing - backoff = wait.Backoff{ - Duration: 1 * time.Second, - Factor: float64(1), - Steps: 2, - } -} - -type MockClient struct { - Timeout time.Duration -} - -var funcFoo func() (*http.Response, error) - -// Do is the mock request that will return a mock success -func (m *MockClient) Do(req *http.Request) (*http.Response, error) { - return funcFoo() -} - -func TestCheckForUpgrades(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, true) - cfg := &rest.Config{} - - ctx := logging.NewContext(context.Background(), logging.Discard()) - gate := feature.NewGate() - assert.NilError(t, gate.SetFromMap(map[string]bool{ - feature.TablespaceVolumes: true, - })) - ctx = feature.NewContext(ctx, gate) - - // Pass *testing.T to allows the correct messages from the assert package - // in the event of certain failures. - checkData := func(t *testing.T, header string) { - data := clientUpgradeData{} - err := json.Unmarshal([]byte(header), &data) - assert.NilError(t, err) - assert.Assert(t, data.DeploymentID != "") - assert.Equal(t, data.PGOVersion, "4.7.3") - assert.Equal(t, data.RegistrationToken, "speakFriend") - assert.Equal(t, data.BridgeClustersTotal, 2) - assert.Equal(t, data.PGOClustersTotal, 2) - assert.Equal(t, data.FeatureGatesEnabled, "TablespaceVolumes=true") - } - - t.Run("success", func(t *testing.T) { - // A successful call - funcFoo = func() (*http.Response, error) { - json := `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}` - return &http.Response{ - Body: io.NopCloser(strings.NewReader(json)), - StatusCode: http.StatusOK, - }, nil - } - - res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false, "speakFriend") - assert.NilError(t, err) - assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) - checkData(t, header) - }) - - t.Run("total failure, err sending", func(t *testing.T) { - var counter int - // A call returning errors - funcFoo = func() (*http.Response, error) { - counter++ - return &http.Response{}, errors.New("whoops") - } - - res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false, "speakFriend") - // Two failed calls because of env var - assert.Equal(t, counter, 2) - assert.Equal(t, res, "") - assert.Equal(t, err.Error(), `whoops`) - checkData(t, header) - }) - - t.Run("total failure, bad StatusCode", func(t *testing.T) { - var counter int - // A call returning bad StatusCode - funcFoo = func() (*http.Response, error) { - counter++ - return &http.Response{ - Body: io.NopCloser(strings.NewReader("")), - StatusCode: http.StatusBadRequest, - }, nil - } - - res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false, "speakFriend") - assert.Equal(t, res, "") - // Two failed calls because of env var - assert.Equal(t, counter, 2) - assert.Equal(t, err.Error(), `received StatusCode 400`) - checkData(t, header) - }) - - t.Run("one failure, then success", func(t *testing.T) { - var counter int - // A call returning bad StatusCode the first time - // and a successful response the second time - funcFoo = func() (*http.Response, error) { - if counter < 1 { - counter++ - return &http.Response{ - Body: io.NopCloser(strings.NewReader("")), - StatusCode: http.StatusBadRequest, - }, nil - } - counter++ - json := `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}` - return &http.Response{ - Body: io.NopCloser(strings.NewReader(json)), - StatusCode: http.StatusOK, - }, nil - } - - res, header, err := checkForUpgrades(ctx, "", "4.7.3", backoff, - fakeClient, cfg, false, "speakFriend") - assert.Equal(t, counter, 2) - assert.NilError(t, err) - assert.Equal(t, res, `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}`) - checkData(t, header) - }) -} - -// TODO(benjaminjb): Replace `fake` with envtest -func TestCheckForUpgradesScheduler(t *testing.T) { - fakeClient := setupFakeClientWithPGOScheme(t, false) - _, server := setupVersionServer(t, true) - defer server.Close() - cfg := &rest.Config{Host: server.URL} - - t.Run("panic from checkForUpgrades doesn't bubble up", func(t *testing.T) { - ctx := context.Background() - - // capture logs - var calls []string - ctx = logging.NewContext(ctx, funcr.NewJSON(func(object string) { - calls = append(calls, object) - }, funcr.Options{ - Verbosity: 1, - })) - - // A panicking call - funcFoo = func() (*http.Response, error) { - panic(fmt.Errorf("oh no!")) - } - - s := CheckForUpgradesScheduler{ - Client: fakeClient, - Config: cfg, - } - s.check(ctx) - - assert.Equal(t, len(calls), 2) - assert.Assert(t, cmp.Contains(calls[1], `encountered panic in upgrade check`)) - }) - - t.Run("successful log each loop, ticker works", func(t *testing.T) { - ctx := context.Background() - - // capture logs - var calls []string - ctx = logging.NewContext(ctx, funcr.NewJSON(func(object string) { - calls = append(calls, object) - }, funcr.Options{ - Verbosity: 1, - })) - - // A successful call - funcFoo = func() (*http.Response, error) { - json := `{"pgo_versions":[{"tag":"v5.0.4"},{"tag":"v5.0.3"},{"tag":"v5.0.2"},{"tag":"v5.0.1"},{"tag":"v5.0.0"}]}` - return &http.Response{ - Body: io.NopCloser(strings.NewReader(json)), - StatusCode: http.StatusOK, - }, nil - } - - // Set loop time to 1s and sleep for 2s before sending the done signal - ctx, cancel := context.WithTimeout(ctx, 2*time.Second) - defer cancel() - s := CheckForUpgradesScheduler{ - Client: fakeClient, - Config: cfg, - Refresh: 1 * time.Second, - } - assert.ErrorIs(t, context.DeadlineExceeded, s.Start(ctx)) - - // Sleeping leads to some non-deterministic results, but we expect at least 2 executions - // plus one log for the failure to apply the configmap - assert.Assert(t, len(calls) >= 4) - - assert.Assert(t, cmp.Contains(calls[1], `{\"pgo_versions\":[{\"tag\":\"v5.0.4\"},{\"tag\":\"v5.0.3\"},{\"tag\":\"v5.0.2\"},{\"tag\":\"v5.0.1\"},{\"tag\":\"v5.0.0\"}]}`)) - assert.Assert(t, cmp.Contains(calls[3], `{\"pgo_versions\":[{\"tag\":\"v5.0.4\"},{\"tag\":\"v5.0.3\"},{\"tag\":\"v5.0.2\"},{\"tag\":\"v5.0.1\"},{\"tag\":\"v5.0.0\"}]}`)) - }) -} - -func TestCheckForUpgradesSchedulerLeaderOnly(t *testing.T) { - // CheckForUpgradesScheduler should implement this interface. - var s manager.LeaderElectionRunnable = new(CheckForUpgradesScheduler) - - assert.Assert(t, s.NeedLeaderElection(), - "expected to only run on the leader") -} diff --git a/internal/util/secrets.go b/internal/util/secrets.go index 82768c9386..0d372aea3c 100644 --- a/internal/util/secrets.go +++ b/internal/util/secrets.go @@ -1,4 +1,4 @@ -// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/internal/util/secrets_test.go b/internal/util/secrets_test.go index 5d549ca89e..ae5f7f5b05 100644 --- a/internal/util/secrets_test.go +++ b/internal/util/secrets_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -55,7 +55,7 @@ func TestGenerateAlphaNumericPassword(t *testing.T) { } previous := sets.Set[string]{} - for i := 0; i < 10; i++ { + for range 10 { password, err := GenerateAlphaNumericPassword(5) assert.NilError(t, err) @@ -80,7 +80,7 @@ func TestGenerateASCIIPassword(t *testing.T) { } previous := sets.Set[string]{} - for i := 0; i < 10; i++ { + for range 10 { password, err := GenerateASCIIPassword(5) assert.NilError(t, err) diff --git a/internal/util/util.go b/internal/util/util.go index 72634ebbc6..a008c9b3e1 100644 --- a/internal/util/util.go +++ b/internal/util/util.go @@ -1,4 +1,4 @@ -// Copyright 2017 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -24,7 +24,7 @@ func SQLQuoteIdentifier(identifier string) string { identifier = identifier[:end] } - return `"` + strings.Replace(identifier, `"`, `""`, -1) + `"` + return `"` + strings.ReplaceAll(identifier, `"`, `""`) + `"` } // SQLQuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal @@ -45,14 +45,14 @@ func SQLQuoteLiteral(literal string) string { // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c // // substitute any single-quotes (') with two single-quotes ('') - literal = strings.Replace(literal, `'`, `''`, -1) + literal = strings.ReplaceAll(literal, `'`, `''`) // determine if the string has any backslashes (\) in it. // if it does, replace any backslashes (\) with two backslashes (\\) // then, we need to wrap the entire string with a PostgreSQL // C-style escape. Per how "PQEscapeStringInternal" handles this case, we // also add a space before the "E" if strings.Contains(literal, `\`) { - literal = strings.Replace(literal, `\`, `\\`, -1) + literal = strings.ReplaceAll(literal, `\`, `\\`) literal = ` E'` + literal + `'` } else { // otherwise, we can just wrap the literal with a pair of single quotes diff --git a/internal/util/volumes.go b/internal/util/volumes.go new file mode 100644 index 0000000000..34e2699b54 --- /dev/null +++ b/internal/util/volumes.go @@ -0,0 +1,42 @@ +// Copyright 2017 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "fmt" + + corev1 "k8s.io/api/core/v1" +) + +// AddVolumeAndMountsToPod takes a Pod spec and a PVC and adds a Volume to the Pod spec with +// the PVC as the VolumeSource and mounts the volume to all containers and init containers +// in the Pod spec. +func AddVolumeAndMountsToPod(podSpec *corev1.PodSpec, volume *corev1.PersistentVolumeClaim) { + + podSpec.Volumes = append(podSpec.Volumes, corev1.Volume{ + Name: volume.Name, + VolumeSource: corev1.VolumeSource{ + PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ + ClaimName: volume.Name, + }, + }, + }) + + for i := range podSpec.Containers { + podSpec.Containers[i].VolumeMounts = append(podSpec.Containers[i].VolumeMounts, + corev1.VolumeMount{ + Name: volume.Name, + MountPath: fmt.Sprintf("/volumes/%s", volume.Name), + }) + } + + for i := range podSpec.InitContainers { + podSpec.InitContainers[i].VolumeMounts = append(podSpec.InitContainers[i].VolumeMounts, + corev1.VolumeMount{ + Name: volume.Name, + MountPath: fmt.Sprintf("/volumes/%s", volume.Name), + }) + } +} diff --git a/internal/util/volumes_test.go b/internal/util/volumes_test.go new file mode 100644 index 0000000000..b438943e3a --- /dev/null +++ b/internal/util/volumes_test.go @@ -0,0 +1,78 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package util + +import ( + "testing" + + "github.com/google/go-cmp/cmp/cmpopts" + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "github.com/crunchydata/postgres-operator/internal/testing/cmp" +) + +func TestAddVolumeAndMountsToPod(t *testing.T) { + pod := &corev1.PodSpec{ + Containers: []corev1.Container{ + {Name: "database"}, + {Name: "other"}, + {Name: "pgbackrest"}, + }, + InitContainers: []corev1.Container{ + {Name: "initializer"}, + {Name: "another"}, + }, + } + + volume := &corev1.PersistentVolumeClaim{ + ObjectMeta: metav1.ObjectMeta{ + Name: "volume-name", + }, + } + + alwaysExpect := func(t testing.TB, result *corev1.PodSpec) { + // Only Containers, InitContainers, and Volumes fields have changed. + assert.DeepEqual(t, *pod, *result, cmpopts.IgnoreFields(*pod, "Containers", "InitContainers", "Volumes")) + + // Volume is mounted to all containers + assert.Assert(t, cmp.MarshalMatches(result.Containers, ` +- name: database + resources: {} + volumeMounts: + - mountPath: /volumes/volume-name + name: volume-name +- name: other + resources: {} + volumeMounts: + - mountPath: /volumes/volume-name + name: volume-name +- name: pgbackrest + resources: {} + volumeMounts: + - mountPath: /volumes/volume-name + name: volume-name + `)) + + // Volume is mounted to all init containers + assert.Assert(t, cmp.MarshalMatches(result.InitContainers, ` +- name: initializer + resources: {} + volumeMounts: + - mountPath: /volumes/volume-name + name: volume-name +- name: another + resources: {} + volumeMounts: + - mountPath: /volumes/volume-name + name: volume-name + `)) + } + + out := pod.DeepCopy() + AddVolumeAndMountsToPod(out, volume) + alwaysExpect(t, out) +} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go index 801e75f51d..91ab6b2f9a 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/crunchy_bridgecluster_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -23,7 +23,7 @@ type CrunchyBridgeClusterSpec struct { // Whether the cluster is protected. Protected clusters can't be destroyed until // their protected flag is removed - // +optional + // +kubebuilder:validation:Optional IsProtected bool `json:"isProtected,omitempty"` // The name of the cluster @@ -65,14 +65,14 @@ type CrunchyBridgeClusterSpec struct { // are retrieved from the Bridge API. An empty list creates no role secrets. // Removing a role from this list does NOT drop the role nor revoke their // access, but it will delete that role's secret from the kube cluster. + // +kubebuilder:validation:Optional // +listType=map // +listMapKey=name - // +optional Roles []*CrunchyBridgeClusterRoleSpec `json:"roles,omitempty"` // The name of the secret containing the API key and team id // +kubebuilder:validation:Required - Secret string `json:"secret,omitempty"` + Secret string `json:"secret"` // The amount of storage available to the cluster in gigabytes. // The amount must be an integer, followed by Gi (gibibytes) or G (gigabytes) to match Kubernetes conventions. @@ -86,9 +86,11 @@ type CrunchyBridgeClusterSpec struct { type CrunchyBridgeClusterRoleSpec struct { // Name of the role within Crunchy Bridge. // More info: https://docs.crunchybridge.com/concepts/users + // +kubebuilder:validation:Required Name string `json:"name"` // The name of the Secret that will hold the role credentials. + // +kubebuilder:validation:Required // +kubebuilder:validation:Pattern=`^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$` // +kubebuilder:validation:MaxLength=253 // +kubebuilder:validation:Type=string diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go index 15773a1815..a8ddca9804 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/groupversion_info.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go index 2f01399372..2d61399e55 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/patroni_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go index 06c7321bc4..728a96fab6 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgadmin_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go index 2f528a361a..7bf876c791 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbackrest_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -49,15 +49,15 @@ type PGBackRestJobStatus struct { type PGBackRestScheduledBackupStatus struct { // The name of the associated pgBackRest scheduled backup CronJob - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional CronJobName string `json:"cronJobName,omitempty"` // The name of the associated pgBackRest repository - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional RepoName string `json:"repo,omitempty"` // The pgBackRest backup type for this Job - // +kubebuilder:validation:Required + // +kubebuilder:validation:Optional Type string `json:"type,omitempty"` // Represents the time the manual backup Job was acknowledged by the Job controller. diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go index e940a9300d..61ad815a4f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgbouncer_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go index f2cd78335a..e0ea440c4d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgmonitor_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go index fd32862d2d..409cd46536 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/pgupgrade_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go index b7baa72942..86573cb98a 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgres_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go index 83396902d0..099418b494 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index d43197ce11..6b7927a02d 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -178,6 +178,7 @@ type DataSource struct { // The PGBackRest field is incompatible with the PostgresCluster field: only one // data source can be used for pre-populating a new PostgreSQL cluster // +optional + // +kubebuilder:validation:XValidation:rule="!has(self.repo.volume)", message="Only S3, GCS or Azure repos can be used as a pgBackRest data source." PGBackRest *PGBackRestDataSource `json:"pgbackrest,omitempty"` // Defines a pgBackRest data source that can be used to pre-populate the PostgreSQL data @@ -338,12 +339,6 @@ type PostgresClusterStatus struct { // +optional PGBackRest *PGBackRestStatus `json:"pgbackrest,omitempty"` - // +optional - RegistrationRequired *RegistrationRequirementStatus `json:"registrationRequired,omitempty"` - - // +optional - TokenRequired string `json:"tokenRequired,omitempty"` - // Stores the current PostgreSQL major version following a successful // major PostgreSQL upgrade. // +optional @@ -397,7 +392,6 @@ const ( PersistentVolumeResizing = "PersistentVolumeResizing" PostgresClusterProgressing = "Progressing" ProxyAvailable = "ProxyAvailable" - Registered = "Registered" ) type PostgresInstanceSetSpec struct { @@ -567,10 +561,6 @@ func (s *PostgresProxySpec) Default() { } } -type RegistrationRequirementStatus struct { - PGOVersion string `json:"pgoVersion,omitempty"` -} - type PostgresProxyStatus struct { PGBouncer PGBouncerPodStatus `json:"pgBouncer,omitempty"` } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go index 1dc4e3627e..bcfadd68ee 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types.go @@ -1,4 +1,4 @@ -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -18,13 +18,8 @@ import ( type SchemalessObject map[string]any // DeepCopy creates a new SchemalessObject by copying the receiver. -func (in *SchemalessObject) DeepCopy() *SchemalessObject { - if in == nil { - return nil - } - out := new(SchemalessObject) - *out = runtime.DeepCopyJSON(*in) - return out +func (in SchemalessObject) DeepCopy() SchemalessObject { + return runtime.DeepCopyJSON(in) } type ServiceSpec struct { diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go index 96cd4da073..781f9d8c2c 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/shared_types_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2022 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -15,11 +15,8 @@ import ( func TestSchemalessObjectDeepCopy(t *testing.T) { t.Parallel() - var n *SchemalessObject - assert.DeepEqual(t, n, n.DeepCopy()) - var z SchemalessObject - assert.DeepEqual(t, z, *z.DeepCopy()) + assert.DeepEqual(t, z, z.DeepCopy()) var one SchemalessObject assert.NilError(t, yaml.Unmarshal( @@ -27,31 +24,31 @@ func TestSchemalessObjectDeepCopy(t *testing.T) { )) // reflect and go-cmp agree the original and copy are equivalent. - same := *one.DeepCopy() + same := one.DeepCopy() assert.DeepEqual(t, one, same) assert.Assert(t, reflect.DeepEqual(one, same)) // Changes to the copy do not affect the original. { - change := *one.DeepCopy() + change := one.DeepCopy() change["str"] = "banana" assert.Assert(t, reflect.DeepEqual(one, same)) assert.Assert(t, !reflect.DeepEqual(one, change)) } { - change := *one.DeepCopy() + change := one.DeepCopy() change["num"] = 99 assert.Assert(t, reflect.DeepEqual(one, same)) assert.Assert(t, !reflect.DeepEqual(one, change)) } { - change := *one.DeepCopy() + change := one.DeepCopy() change["arr"].([]any)[0] = "rock" assert.Assert(t, reflect.DeepEqual(one, same)) assert.Assert(t, !reflect.DeepEqual(one, change)) } { - change := *one.DeepCopy() + change := one.DeepCopy() change["arr"] = append(change["arr"].([]any), "more") assert.Assert(t, reflect.DeepEqual(one, same)) assert.Assert(t, !reflect.DeepEqual(one, change)) diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go index 4fbc90a3b9..6b0f44895e 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/standalone_pgadmin_types.go @@ -1,4 +1,4 @@ -// Copyright 2023 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2023 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -175,6 +175,10 @@ type PGAdminStatus struct { // +optional MajorVersion int `json:"majorVersion,omitempty"` + // MinorVersion represents the minor version of the running pgAdmin. + // +optional + MinorVersion string `json:"minorVersion,omitempty"` + // observedGeneration represents the .metadata.generation on which the status was based. // +optional // +kubebuilder:validation:Minimum=0 diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index fa32069d0f..5416b3aa7e 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -1,6 +1,6 @@ //go:build !ignore_autogenerated -// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. // // SPDX-License-Identifier: Apache-2.0 @@ -18,9 +18,9 @@ import ( // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *APIResponses) DeepCopyInto(out *APIResponses) { *out = *in - in.Cluster.DeepCopyInto(&out.Cluster) - in.Status.DeepCopyInto(&out.Status) - in.Upgrade.DeepCopyInto(&out.Upgrade) + out.Cluster = in.Cluster.DeepCopy() + out.Status = in.Status.DeepCopy() + out.Upgrade = in.Upgrade.DeepCopy() } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResponses. @@ -517,7 +517,7 @@ func (in *PGAdminConfiguration) DeepCopyInto(out *PGAdminConfiguration) { *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } - in.Settings.DeepCopyInto(&out.Settings) + out.Settings = in.Settings.DeepCopy() } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PGAdminConfiguration. @@ -1450,7 +1450,7 @@ func (in *PGUpgradeStatus) DeepCopy() *PGUpgradeStatus { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PatroniSpec) DeepCopyInto(out *PatroniSpec) { *out = *in - in.DynamicConfiguration.DeepCopyInto(&out.DynamicConfiguration) + out.DynamicConfiguration = in.DynamicConfiguration.DeepCopy() if in.LeaderLeaseDurationSeconds != nil { in, out := &in.LeaderLeaseDurationSeconds, &out.LeaderLeaseDurationSeconds *out = new(int32) @@ -1789,11 +1789,6 @@ func (in *PostgresClusterStatus) DeepCopyInto(out *PostgresClusterStatus) { *out = new(PGBackRestStatus) (*in).DeepCopyInto(*out) } - if in.RegistrationRequired != nil { - in, out := &in.RegistrationRequired, &out.RegistrationRequired - *out = new(RegistrationRequirementStatus) - **out = **in - } out.Proxy = in.Proxy if in.UserInterface != nil { in, out := &in.UserInterface, &out.UserInterface @@ -2039,21 +2034,6 @@ func (in *PostgresUserSpec) DeepCopy() *PostgresUserSpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RegistrationRequirementStatus) DeepCopyInto(out *RegistrationRequirementStatus) { - *out = *in -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RegistrationRequirementStatus. -func (in *RegistrationRequirementStatus) DeepCopy() *RegistrationRequirementStatus { - if in == nil { - return nil - } - out := new(RegistrationRequirementStatus) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RepoAzure) DeepCopyInto(out *RepoAzure) { *out = *in @@ -2150,8 +2130,7 @@ func (in *RepoStatus) DeepCopy() *RepoStatus { func (in SchemalessObject) DeepCopyInto(out *SchemalessObject) { { in := &in - clone := in.DeepCopy() - *out = *clone + *out = in.DeepCopy() } } @@ -2241,13 +2220,13 @@ func (in *StandalonePGAdminConfiguration) DeepCopyInto(out *StandalonePGAdminCon *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } - in.Gunicorn.DeepCopyInto(&out.Gunicorn) + out.Gunicorn = in.Gunicorn.DeepCopy() if in.LDAPBindPassword != nil { in, out := &in.LDAPBindPassword, &out.LDAPBindPassword *out = new(corev1.SecretKeySelector) (*in).DeepCopyInto(*out) } - in.Settings.DeepCopyInto(&out.Settings) + out.Settings = in.Settings.DeepCopy() } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StandalonePGAdminConfiguration. diff --git a/testing/kuttl/README.md b/testing/kuttl/README.md index 555ce9a26d..a67ff35808 100644 --- a/testing/kuttl/README.md +++ b/testing/kuttl/README.md @@ -44,20 +44,6 @@ There are two ways to run a single test in isolation: - using an env var with the make target: `KUTTL_TEST='kuttl test --test ' make check-kuttl` - using `kubectl kuttl --test` flag: `kubectl kuttl test testing/kuttl/e2e-generated --test ` -### Writing additional tests - -To make it easier to read tests, we want to put our `assert.yaml`/`errors.yaml` files after the -files that create/update the objects for a step. To achieve this, infix an extra `-` between the -step number and the object/step name. - -For example, if the `00` test step wants to create a cluster and then assert that the cluster is ready, -the files would be named - -```yaml -00--cluster.yaml # note the extra `-` to ensure that it sorts above the following file -00-assert.yaml -``` - ### Generating tests KUTTL is good at setting up K8s objects for testing, but does not have a native way to dynamically @@ -74,19 +60,10 @@ calling the `make generate-kuttl` target: KUTTL_PG_VERSION=13 KUTTL_POSTGIS_VERSION=3.0 make generate-kuttl ``` -This will loop through the files under the `e2e` and `e2e-other` directories and create matching -files under the `e2e-generated` and `e2e-generated-other` directories that can be checked for +This will loop through the files under testing directories and create matching +files under testing + `generated` directories that can be checked for correctness before running the tests. -Please note, `make check-kuttl` does not run the `e2e-other` tests. To run the `postgis-cluster` -test, you can use: - -``` -kubectl kuttl test testing/kuttl/e2e-generated-other/ --timeout=180 --test postgis-cluster` -``` - -To run the `gssapi` test, please see testing/kuttl/e2e-other/gssapi/README.md. - To prevent errors, we want to set defaults for all the environment variables used in the source YAML files; so if you add a new test with a new variable, please update the Makefile with a reasonable/preferred default. diff --git a/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml deleted file mode 100644 index b4372b75e7..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/00-assert.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Ensure that the default StorageClass supports VolumeExpansion -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - annotations: - storageclass.kubernetes.io/is-default-class: "true" -allowVolumeExpansion: true diff --git a/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml b/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml deleted file mode 100644 index fc947a538f..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/01-create.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/01-create-cluster.yaml -assert: -- files/01-cluster-and-pvc-created.yaml diff --git a/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml b/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml deleted file mode 100644 index 261c274a51..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/02-add-data.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/02-create-data.yaml -assert: -- files/02-create-data-completed.yaml diff --git a/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml deleted file mode 100644 index ad31b61401..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/03-assert.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# Check that annotation is set -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: auto-grow-volume - postgres-operator.crunchydata.com/data: postgres - postgres-operator.crunchydata.com/instance-set: instance1 - postgres-operator.crunchydata.com/patroni: auto-grow-volume-ha - annotations: - suggested-pgdata-pvc-size: 1461Mi diff --git a/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml b/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml deleted file mode 100644 index d486f9de18..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/04-assert.yaml +++ /dev/null @@ -1,19 +0,0 @@ -# We know that the PVC sizes have changed so now we can check that they have been -# updated to have the expected size ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: auto-grow-volume - postgres-operator.crunchydata.com/instance-set: instance1 -spec: - resources: - requests: - storage: 1461Mi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 2Gi - phase: Bound diff --git a/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml b/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml deleted file mode 100644 index 475177d242..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/05-check-event.yaml +++ /dev/null @@ -1,12 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - # Verify expected event has occurred - - script: | - EVENT=$( - kubectl get events --namespace="${NAMESPACE}" \ - --field-selector reason="VolumeAutoGrow" --output=jsonpath={.items..message} - ) - - if [[ "${EVENT}" != "pgData volume expansion to 1461Mi requested for auto-grow-volume/instance1." ]]; then exit 1; fi diff --git a/testing/kuttl/e2e-other/autogrow-volume/README.md b/testing/kuttl/e2e-other/autogrow-volume/README.md deleted file mode 100644 index 674bc69b40..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/README.md +++ /dev/null @@ -1,9 +0,0 @@ -### AutoGrow Volume - -* 00: Assert the storage class allows volume expansion -* 01: Create and verify PostgresCluster and PVC -* 02: Add data to trigger growth and verify Job completes -* 03: Verify annotation on the instance Pod -* 04: Verify the PVC request has been set and the PVC has grown -* 05: Verify the expansion request Event has been created - Note: This Event should be created between steps 03 and 04 but is checked at the end for timing purposes. diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml deleted file mode 100644 index 17804b8205..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/files/01-cluster-and-pvc-created.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: auto-grow-volume -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: auto-grow-volume - postgres-operator.crunchydata.com/instance-set: instance1 -spec: - resources: - requests: - storage: 1Gi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - phase: Bound diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml deleted file mode 100644 index 01eaf7a684..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/files/01-create-cluster.yaml +++ /dev/null @@ -1,27 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: auto-grow-volume -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - limits: - storage: 2Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml deleted file mode 100644 index fdb42e68f5..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data-completed.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: create-data -status: - succeeded: 1 diff --git a/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml b/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml deleted file mode 100644 index c42f0dec10..0000000000 --- a/testing/kuttl/e2e-other/autogrow-volume/files/02-create-data.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -# Create some data that should be present after resizing. -apiVersion: batch/v1 -kind: Job -metadata: - name: create-data - labels: { postgres-operator-test: kuttl } -spec: - backoffLimit: 3 - template: - metadata: - labels: { postgres-operator-test: kuttl } - spec: - restartPolicy: Never - containers: - - name: psql - image: ${KUTTL_PSQL_IMAGE} - env: - - name: PGURI - valueFrom: { secretKeyRef: { name: auto-grow-volume-pguser-auto-grow-volume, key: uri } } - - # Do not wait indefinitely, but leave enough time to create the data. - - { name: PGCONNECT_TIMEOUT, value: '60' } - - command: - - psql - - $(PGURI) - - --set=ON_ERROR_STOP=1 - - --command - - | # create schema for user and add enough data to get over 75% usage - CREATE SCHEMA "auto-grow-volume" AUTHORIZATION "auto-grow-volume"; - CREATE TABLE big_table AS SELECT 'data' || s AS mydata FROM generate_series(1,6000000) AS s; diff --git a/testing/kuttl/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml b/testing/kuttl/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml deleted file mode 100644 index 1ccceb7098..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/01--non-crunchy-cluster.yaml +++ /dev/null @@ -1,193 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: non-crunchy-cluster - labels: - postgres-operator-test: kuttl - app.kubernetes.io/name: postgresql - app.kubernetes.io/instance: non-crunchy-cluster -type: Opaque -stringData: - postgres-password: "SR6kNAFXvX" ---- -apiVersion: v1 -kind: Service -metadata: - name: non-crunchy-cluster-hl - labels: - postgres-operator-test: kuttl - app.kubernetes.io/name: postgresql - app.kubernetes.io/instance: non-crunchy-cluster - app.kubernetes.io/component: primary - service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" -spec: - type: ClusterIP - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: tcp-postgresql - port: 5432 - targetPort: tcp-postgresql - selector: - app.kubernetes.io/name: postgresql - app.kubernetes.io/instance: non-crunchy-cluster - app.kubernetes.io/component: primary ---- -apiVersion: v1 -kind: Service -metadata: - name: non-crunchy-cluster - labels: - postgres-operator-test: kuttl - app.kubernetes.io/name: postgresql - app.kubernetes.io/instance: non-crunchy-cluster - app.kubernetes.io/component: primary -spec: - type: ClusterIP - sessionAffinity: None - ports: - - name: tcp-postgresql - port: 5432 - targetPort: tcp-postgresql - nodePort: null - selector: - app.kubernetes.io/name: postgresql - app.kubernetes.io/instance: non-crunchy-cluster - app.kubernetes.io/component: primary ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: non-crunchy-cluster - labels: - postgres-operator-test: kuttl - app.kubernetes.io/name: postgresql - app.kubernetes.io/instance: non-crunchy-cluster - app.kubernetes.io/component: primary -spec: - replicas: 1 - serviceName: non-crunchy-cluster-hl - updateStrategy: - rollingUpdate: {} - type: RollingUpdate - selector: - matchLabels: - postgres-operator-test: kuttl - app.kubernetes.io/name: postgresql - app.kubernetes.io/instance: non-crunchy-cluster - app.kubernetes.io/component: primary - template: - metadata: - name: non-crunchy-cluster - labels: - postgres-operator-test: kuttl - app.kubernetes.io/name: postgresql - app.kubernetes.io/instance: non-crunchy-cluster - app.kubernetes.io/component: primary - spec: - serviceAccountName: default - affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - postgres-operator-test: kuttl - app.kubernetes.io/name: postgresql - app.kubernetes.io/instance: non-crunchy-cluster - app.kubernetes.io/component: primary - namespaces: - - "default" - topologyKey: kubernetes.io/hostname - weight: 1 - securityContext: - fsGroup: 1001 - hostNetwork: false - hostIPC: false - containers: - - name: postgresql - image: docker.io/bitnami/postgresql:${KUTTL_BITNAMI_IMAGE_TAG} - imagePullPolicy: "IfNotPresent" - securityContext: - runAsUser: 1001 - env: - - name: BITNAMI_DEBUG - value: "false" - - name: POSTGRESQL_PORT_NUMBER - value: "5432" - - name: POSTGRESQL_VOLUME_DIR - value: "/bitnami/postgresql" - - name: PGDATA - value: "/bitnami/postgresql/data" - - name: POSTGRES_PASSWORD - valueFrom: - secretKeyRef: - name: non-crunchy-cluster - key: postgres-password - - name: POSTGRESQL_ENABLE_LDAP - value: "no" - - name: POSTGRESQL_ENABLE_TLS - value: "no" - - name: POSTGRESQL_LOG_HOSTNAME - value: "false" - - name: POSTGRESQL_LOG_CONNECTIONS - value: "false" - - name: POSTGRESQL_LOG_DISCONNECTIONS - value: "false" - - name: POSTGRESQL_PGAUDIT_LOG_CATALOG - value: "off" - - name: POSTGRESQL_CLIENT_MIN_MESSAGES - value: "error" - - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES - value: "pgaudit" - ports: - - name: tcp-postgresql - containerPort: 5432 - livenessProbe: - failureThreshold: 6 - initialDelaySeconds: 30 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - exec: - command: - - /bin/sh - - -c - - exec pg_isready -U "postgres" -h localhost -p 5432 - readinessProbe: - failureThreshold: 6 - initialDelaySeconds: 5 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 5 - exec: - command: - - /bin/sh - - -c - - -e - - | - exec pg_isready -U "postgres" -h localhost -p 5432 - [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] - resources: - limits: {} - requests: - cpu: 250m - memory: 256Mi - volumeMounts: - - name: dshm - mountPath: /dev/shm - - name: data - mountPath: /bitnami/postgresql - volumes: - - name: dshm - emptyDir: - medium: Memory - volumeClaimTemplates: - - metadata: - name: data - spec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: "1Gi" diff --git a/testing/kuttl/e2e-other/cluster-migrate/01-assert.yaml b/testing/kuttl/e2e-other/cluster-migrate/01-assert.yaml deleted file mode 100644 index c45fe79261..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/01-assert.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: non-crunchy-cluster -status: - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 diff --git a/testing/kuttl/e2e-other/cluster-migrate/02--create-data.yaml b/testing/kuttl/e2e-other/cluster-migrate/02--create-data.yaml deleted file mode 100644 index a9b7ebf152..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/02--create-data.yaml +++ /dev/null @@ -1,30 +0,0 @@ ---- -# Create some data that will be preserved after migration. -apiVersion: batch/v1 -kind: Job -metadata: - name: original-data - labels: { postgres-operator-test: kuttl } -spec: - backoffLimit: 3 - template: - metadata: - labels: { postgres-operator-test: kuttl } - spec: - restartPolicy: Never - containers: - - name: psql - image: ${KUTTL_PSQL_IMAGE} - env: - - { name: PGHOST, value: "non-crunchy-cluster" } - # Do not wait indefinitely. - - { name: PGCONNECT_TIMEOUT, value: '5' } - - { name: PGPASSWORD, valueFrom: { secretKeyRef: { name: non-crunchy-cluster, key: postgres-password } } } - command: - - psql - - --username=postgres - - --dbname=postgres - - --set=ON_ERROR_STOP=1 - - --command - - | - CREATE TABLE IF NOT EXISTS important (data) AS VALUES ('treasure'); diff --git a/testing/kuttl/e2e-other/cluster-migrate/02-assert.yaml b/testing/kuttl/e2e-other/cluster-migrate/02-assert.yaml deleted file mode 100644 index 5115ba97c9..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/02-assert.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: original-data -status: - succeeded: 1 diff --git a/testing/kuttl/e2e-other/cluster-migrate/03--alter-pv.yaml b/testing/kuttl/e2e-other/cluster-migrate/03--alter-pv.yaml deleted file mode 100644 index 64fa700297..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/03--alter-pv.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - script: | - set -e - VOLUME_NAME=$( - kubectl get pvc --namespace "${NAMESPACE}" \ - --output=jsonpath={.items..spec.volumeName} - ) - - ORIGINAL_POLICY=$( - kubectl get pv "${VOLUME_NAME}" \ - --output=jsonpath={.spec.persistentVolumeReclaimPolicy} - ) - - kubectl create configmap persistent-volume-reclaim-policy --namespace "${NAMESPACE}" \ - --from-literal=ORIGINAL_POLICY="${ORIGINAL_POLICY}" \ - --from-literal=VOLUME_NAME="${VOLUME_NAME}" - - kubectl patch pv "${VOLUME_NAME}" -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}' - - kubectl label pv "${VOLUME_NAME}" postgres-operator-test=kuttl app.kubernetes.io/name=postgresql app.kubernetes.io/instance=non-crunchy-cluster test-namespace="${NAMESPACE}" diff --git a/testing/kuttl/e2e-other/cluster-migrate/04--delete.yaml b/testing/kuttl/e2e-other/cluster-migrate/04--delete.yaml deleted file mode 100644 index ed38b23d9f..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/04--delete.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -delete: -- apiVersion: apps/v1 - kind: StatefulSet - name: non-crunchy-cluster -- apiVersion: v1 - kind: Service - name: non-crunchy-cluster -- apiVersion: v1 - kind: Service - name: non-crunchy-cluster-hl -- apiVersion: v1 - kind: Secret - name: non-crunchy-cluster diff --git a/testing/kuttl/e2e-other/cluster-migrate/04-errors.yaml b/testing/kuttl/e2e-other/cluster-migrate/04-errors.yaml deleted file mode 100644 index 1767e8040f..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/04-errors.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: Pod -metadata: - name: non-crunchy-cluster-0 diff --git a/testing/kuttl/e2e-other/cluster-migrate/05--cluster.yaml b/testing/kuttl/e2e-other/cluster-migrate/05--cluster.yaml deleted file mode 100644 index a81666ed01..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/05--cluster.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-migrate -spec: - dataSource: - volumes: - pgDataVolume: - pvcName: data-non-crunchy-cluster-0 - directory: data - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e-other/cluster-migrate/06-assert.yaml b/testing/kuttl/e2e-other/cluster-migrate/06-assert.yaml deleted file mode 100644 index 1a25966abb..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/06-assert.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: cluster-migrate -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: cluster-migrate - postgres-operator.crunchydata.com/data: postgres - postgres-operator.crunchydata.com/instance-set: instance1 - postgres-operator.crunchydata.com/role: master -status: - phase: Running diff --git a/testing/kuttl/e2e-other/cluster-migrate/07--set-collation.yaml b/testing/kuttl/e2e-other/cluster-migrate/07--set-collation.yaml deleted file mode 100644 index 00eb741f80..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/07--set-collation.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - script: | - set -e - if [[ ${KUTTL_PG_VERSION} -ge 15 ]]; then - PRIMARY= - while [[ -z "${PRIMARY}" ]]; do - PRIMARY=$( - kubectl get pod --namespace "${NAMESPACE}" \ - --output name --selector ' - postgres-operator.crunchydata.com/cluster=cluster-migrate, - postgres-operator.crunchydata.com/role=master' - ) - done - - # Ignore warnings about collation changes. This is DANGEROUS on real data! - # Only do this automatic step in test conditions; with real data, this may cause - # more problems as you may need to reindex. - kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -c database \ - -- psql -qAt --command \ - 'ALTER DATABASE postgres REFRESH COLLATION VERSION; ALTER DATABASE template1 REFRESH COLLATION VERSION;' - fi diff --git a/testing/kuttl/e2e-other/cluster-migrate/08--alter-pv.yaml b/testing/kuttl/e2e-other/cluster-migrate/08--alter-pv.yaml deleted file mode 100644 index c5edfb4c99..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/08--alter-pv.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - script: | - set -e - SAVED_DATA=$( - kubectl get configmap persistent-volume-reclaim-policy --namespace "${NAMESPACE}" \ - --output=jsonpath="{.data..['ORIGINAL_POLICY','VOLUME_NAME']}" - ) - - IFS=' ' - read ORIGINAL_POLICY VOLUME_NAME <<< "${SAVED_DATA}" - - kubectl patch pv "${VOLUME_NAME}" -p '{"spec":{"persistentVolumeReclaimPolicy":"'${ORIGINAL_POLICY}'"}}' - diff --git a/testing/kuttl/e2e-other/cluster-migrate/09--check-data.yaml b/testing/kuttl/e2e-other/cluster-migrate/09--check-data.yaml deleted file mode 100644 index 6a46bd8e9a..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/09--check-data.yaml +++ /dev/null @@ -1,23 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - script: | - set -e - PRIMARY=$( - kubectl get pod --namespace "${NAMESPACE}" \ - --output name --selector ' - postgres-operator.crunchydata.com/cluster=cluster-migrate, - postgres-operator.crunchydata.com/role=master' - ) - - TREASURE=$( - kubectl exec "${PRIMARY}" --namespace "${NAMESPACE}" \ - --container database \ - -- psql -U postgres -qt -c "select data from important" - ) - - if [[ "${TREASURE}" != " treasure" ]]; then - echo "Migration from 3rd-party PG pod failed, result from query: ${TREASURE}" - exit 1 - fi diff --git a/testing/kuttl/e2e-other/cluster-migrate/README.md b/testing/kuttl/e2e-other/cluster-migrate/README.md deleted file mode 100644 index 09026f9e8b..0000000000 --- a/testing/kuttl/e2e-other/cluster-migrate/README.md +++ /dev/null @@ -1,45 +0,0 @@ -## Cluster Migrate - -This test was developed to check that users could bypass some known problems when -migrating from a non-Crunchy PostgreSQL image to a Crunchy PostgreSQL image: - -1) it changes the ownership of the data directory (which depends on fsGroup -behavior to change group ownership which is not available in all providers); -2) it makes sure a postgresql.conf file is available, as required by Patroni. - -Important note on *environment*: -As noted above, this work relies on fsGroup, so this test will not work in the current -form in all environments. For instance, this creates a PG cluster with fsGroup set, -which will result in an error in OpenShift. - -Important note on *PV permissions*: -This test involves changing permissions on PersistentVolumes, which may not be available -in all environments to all users (since this is a cluster-wide permission). - -Important note on migrating between different builds of *Postgres 15*: -PG 15 introduced new behavior around database collation versions, which result in errors like: - -``` -WARNING: database \"postgres\" has a collation version mismatch -DETAIL: The database was created using collation version 2.31, but the operating system provides version 2.28 -``` - -This error occurred in `reconcilePostgresDatabases` and prevented PGO from finishing the reconcile -loop. For _testing purposes_, this problem is worked around in steps 06 and 07, which wait for -the PG pod to be ready and then send a command to `REFRESH COLLATION VERSION` on the `postgres` -and `template1` databases (which were the only databases where this error was observed during -testing). - -This solution is fine for testing purposes, but is not a solution that should be done in production -as an automatic step. User intervention and supervision is recommended in that case. - -### Steps - -* 01: Create a non-Crunchy PostgreSQL cluster and wait for it to be ready -* 02: Create data on that cluster -* 03: Alter the Reclaim policy of the PV so that it will survive deletion of the cluster -* 04: Delete the original cluster, leaving the PV -* 05: Create a PGO-managed `postgrescluster` with the remaining PV as the datasource -* 06-07: Wait for the PG pod to be ready and alter the collation (PG 15 only, see above) -* 08: Alter the PV to the original Reclaim policy -* 09: Check that the data successfully migrated diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml deleted file mode 100644 index a3236da358..0000000000 --- a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10--cluster.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete-switchover-with-timestamp -spec: - postgresVersion: ${KUTTL_PG_VERSION} - patroni: - switchover: - enabled: true - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml deleted file mode 100644 index d77e27e307..0000000000 --- a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/10-assert.yaml +++ /dev/null @@ -1,36 +0,0 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete-switchover-with-timestamp -status: - instances: - - name: instance1 - readyReplicas: 2 - replicas: 2 - updatedReplicas: 2 ---- -# Patroni labels and readiness happen separately. -# The next step expects to find pods by their role label; wait for them here. -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp - postgres-operator.crunchydata.com/role: master ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp - postgres-operator.crunchydata.com/role: replica ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/11-annotate.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/11-annotate.yaml deleted file mode 100644 index 844d5f1336..0000000000 --- a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/11-annotate.yaml +++ /dev/null @@ -1,19 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - # Label instance pods with their current role. These labels will stick around - # because switchover does not recreate any pods. - - script: | - kubectl label --namespace="${NAMESPACE}" pods \ - --selector='postgres-operator.crunchydata.com/role=master' \ - 'testing/role-before=master' - - script: | - kubectl label --namespace="${NAMESPACE}" pods \ - --selector='postgres-operator.crunchydata.com/role=replica' \ - 'testing/role-before=replica' - - # Annotate the cluster to trigger a switchover. - - script: | - kubectl annotate --namespace="${NAMESPACE}" postgrescluster/delete-switchover-with-timestamp \ - "postgres-operator.crunchydata.com/trigger-switchover=$(date)" diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml deleted file mode 100644 index 76f0f8dff6..0000000000 --- a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/12-assert.yaml +++ /dev/null @@ -1,32 +0,0 @@ ---- -# Wait for switchover to finish. A former replica should now be the primary. -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp - postgres-operator.crunchydata.com/data: postgres - postgres-operator.crunchydata.com/role: master - testing/role-before: replica ---- -# The former primary should now be a replica. -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp - postgres-operator.crunchydata.com/data: postgres - postgres-operator.crunchydata.com/role: replica - testing/role-before: master ---- -# All instances should be healthy. -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete-switchover-with-timestamp -status: - instances: - - name: instance1 - replicas: 2 - readyReplicas: 2 - updatedReplicas: 2 diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/13-delete-cluster-and-check.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/13-delete-cluster-and-check.yaml deleted file mode 100644 index 45352cca2e..0000000000 --- a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/13-delete-cluster-and-check.yaml +++ /dev/null @@ -1,47 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - # Get the names of the current primary and replica -- error if either is blank - # Delete the cluster - # Get the delete event for the pods - # Verify that the replica delete event is greater than the primary delete event - - script: | - PRIMARY=$( - kubectl get pods --namespace="${NAMESPACE}" \ - --selector='postgres-operator.crunchydata.com/role=master' \ - --output=jsonpath={.items..metadata.name} - ) - - REPLICA=$( - kubectl get pods --namespace="${NAMESPACE}" \ - --selector='postgres-operator.crunchydata.com/role=replica' \ - --output=jsonpath={.items..metadata.name} - ) - - echo "DELETE: Found primary ${PRIMARY} and replica ${REPLICA} pods" - - if [ -z "$PRIMARY" ]; then exit 1; fi - if [ -z "$REPLICA" ]; then exit 1; fi - - kubectl delete postgrescluster -n "${NAMESPACE}" delete-switchover-with-timestamp - - kubectl wait "pod/${REPLICA}" --namespace "${NAMESPACE}" --for=delete --timeout=180s - - KILLING_REPLICA_TIMESTAMP=$( - kubectl get events --namespace="${NAMESPACE}" \ - --field-selector reason="Killing",involvedObject.fieldPath="spec.containers{database}",involvedObject.name="${REPLICA}" \ - --output=jsonpath={.items..firstTimestamp} - ) - - kubectl wait "pod/${PRIMARY}" --namespace "${NAMESPACE}" --for=delete --timeout=180s - - KILLING_PRIMARY_TIMESTAMP=$( - kubectl get events --namespace="${NAMESPACE}" \ - --field-selector reason="Killing",involvedObject.fieldPath="spec.containers{database}",involvedObject.name="${PRIMARY}" \ - --output=jsonpath={.items..firstTimestamp} - ) - - echo "DELETE: Found primary ${KILLING_PRIMARY_TIMESTAMP} and replica ${KILLING_REPLICA_TIMESTAMP} timestamps" - - if [[ "${KILLING_PRIMARY_TIMESTAMP}" < "${KILLING_REPLICA_TIMESTAMP}" ]]; then exit 1; fi diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml deleted file mode 100644 index 2a1015824b..0000000000 --- a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/14-errors.yaml +++ /dev/null @@ -1,42 +0,0 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: delete-switchover-with-timestamp ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp ---- -# Patroni DCS objects are not owned by the PostgresCluster. -apiVersion: v1 -kind: Endpoints -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp ---- -apiVersion: v1 -kind: Service -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - postgres-operator.crunchydata.com/cluster: delete-switchover-with-timestamp diff --git a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/README.md b/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/README.md deleted file mode 100644 index bf914aa6cf..0000000000 --- a/testing/kuttl/e2e-other/delete-with-replica-and-check-timestamps/README.md +++ /dev/null @@ -1,7 +0,0 @@ -This test originally existed as the second test-case in the `delete` KUTTL test. -The test as written was prone to occasional flakes, sometimes due to missing events -(which were being used to check the timestamp of the container delete event). - -After discussion, we decided that this behavior (replica deleting before the primary) -was no longer required in v5, and the decision was made to sequester this test-case for -further testing and refinement. \ No newline at end of file diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/00--create-cluster.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/00--create-cluster.yaml deleted file mode 100644 index bc515e3534..0000000000 --- a/testing/kuttl/e2e-other/exporter-append-custom-queries/00--create-cluster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/exporter-append-queries-configmap.yaml -- files/exporter-append-queries-cluster.yaml -assert: -- files/exporter-append-queries-cluster-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/00-assert.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/00-assert.yaml deleted file mode 100644 index 2655841597..0000000000 --- a/testing/kuttl/e2e-other/exporter-append-custom-queries/00-assert.yaml +++ /dev/null @@ -1,50 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# First, check that all containers in the instance pod are ready -# Then, list the query files mounted to the exporter and check for expected files -# Finally, check the contents of the queries to ensure queries.yml was generated correctly -- script: | - retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } - check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - - pod=$(kubectl get pods -o name -n "${NAMESPACE}" \ - -l postgres-operator.crunchydata.com/cluster=exporter-append-queries \ - -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) - [ "$pod" = "" ] && retry "Pod not found" && exit 1 - - condition_json=$(kubectl get "${pod}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") - [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 - { check_containers_ready "$condition_json"; } || { - retry "containers not ready" - exit 1 - } - - queries_files=$( - kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ - -- ls /conf - ) - - { - contains "${queries_files}" "queries.yml" && - contains "${queries_files}" "defaultQueries.yml" - } || { - echo >&2 'The /conf directory should contain queries.yml and defaultQueries.yml. Instead it has:' - echo "${queries_files}" - exit 1 - } - - master_queries_contents=$( - kubectl exec --namespace "${NAMESPACE}" "${pod}" -c exporter \ - -- cat /tmp/queries.yml - ) - - { - contains "${master_queries_contents}" "# This is a test." && - contains "${master_queries_contents}" "ccp_postgresql_version" - } || { - echo >&2 'The master queries.yml file should contain the contents of both defaultQueries.yml and the custom queries.yml file. Instead it contains:' - echo "${master_queries_contents}" - exit 1 - } diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/README.md b/testing/kuttl/e2e-other/exporter-append-custom-queries/README.md deleted file mode 100644 index a24aa444c7..0000000000 --- a/testing/kuttl/e2e-other/exporter-append-custom-queries/README.md +++ /dev/null @@ -1,5 +0,0 @@ -Exporter - AppendCustomQueries Enabled - -Note: This series of tests depends on PGO being deployed with the AppendCustomQueries feature gate ON. There is a separate set of tests in e2e that tests exporter functionality without the AppendCustomQueries feature. - -When running this test, make sure that the PGO_FEATURE_GATES environment variable is set to "AppendCustomQueries=true" on the PGO Deployment. diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml deleted file mode 100644 index 459356ddfc..0000000000 --- a/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster-checks.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: exporter-append-queries -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: exporter-append-queries - postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" -status: - phase: Running ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: exporter-append-queries-exporter-queries-config ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: custom-queries-test diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml deleted file mode 100644 index c4f75771aa..0000000000 --- a/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-cluster.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: exporter-append-queries -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - monitoring: - pgmonitor: - exporter: - configuration: - - configMap: - name: custom-queries-test diff --git a/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml b/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml deleted file mode 100644 index 9964d6bc1e..0000000000 --- a/testing/kuttl/e2e-other/exporter-append-custom-queries/files/exporter-append-queries-configmap.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: custom-queries-test -data: - queries.yml: "# This is a test." diff --git a/testing/kuttl/e2e-other/exporter-replica/00--create-cluster.yaml b/testing/kuttl/e2e-other/exporter-replica/00--create-cluster.yaml deleted file mode 100644 index 2abec0814e..0000000000 --- a/testing/kuttl/e2e-other/exporter-replica/00--create-cluster.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/exporter-replica-cluster.yaml -assert: -- files/exporter-replica-cluster-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-replica/00-assert.yaml b/testing/kuttl/e2e-other/exporter-replica/00-assert.yaml deleted file mode 100644 index 280be2d395..0000000000 --- a/testing/kuttl/e2e-other/exporter-replica/00-assert.yaml +++ /dev/null @@ -1,45 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# First, check that all containers in the instance(s) pod are ready -# Then, grab the exporter metrics output and check that there were no scrape errors -# Finally, ensure the monitoring user exists and is configured -- script: | - retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } - check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - - replica=$(kubectl get pods -o name -n "${NAMESPACE}" \ - -l postgres-operator.crunchydata.com/cluster=exporter-replica \ - -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true \ - -l postgres-operator.crunchydata.com/role=replica) - [ "$replica" = "" ] && retry "Replica Pod not found" && exit 1 - - replica_condition_json=$(kubectl get "${replica}" -n "${NAMESPACE}" -o jsonpath="{.status.conditions}") - [ "$replica_condition_json" = "" ] && retry "Replica conditions not found" && exit 1 - { - check_containers_ready "$replica_condition_json" - } || { - retry "containers not ready" - exit 1 - } - - scrape_metrics=$(kubectl exec ${replica} -c exporter -n ${NAMESPACE} -- \ - curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") - { - contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; - } || { - retry "${scrape_metrics}" - exit 1 - } - - kubectl exec --stdin "${replica}" --namespace "${NAMESPACE}" -c database \ - -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' - DO $$ - DECLARE - result record; - BEGIN - SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; - ASSERT FOUND, 'user not found'; - END $$ - SQL diff --git a/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml b/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml deleted file mode 100644 index 7c775b47b1..0000000000 --- a/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster-checks.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: exporter-replica -status: - instances: - - name: instance1 - readyReplicas: 2 - replicas: 2 - updatedReplicas: 2 ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: exporter-replica - postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" -status: - phase: Running ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: exporter-replica-exporter-queries-config diff --git a/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml b/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml deleted file mode 100644 index 504d33bc3a..0000000000 --- a/testing/kuttl/e2e-other/exporter-replica/files/exporter-replica-cluster.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: exporter-replica -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - replicas: 2 - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - monitoring: - pgmonitor: - exporter: {} diff --git a/testing/kuttl/e2e-other/exporter-standby/00--create-certs.yaml b/testing/kuttl/e2e-other/exporter-standby/00--create-certs.yaml deleted file mode 100644 index 9c9cd140ac..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/00--create-certs.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/cluster-certs.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/01--create-primary.yaml b/testing/kuttl/e2e-other/exporter-standby/01--create-primary.yaml deleted file mode 100644 index 6b5b721d4e..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/01--create-primary.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/primary-cluster.yaml -assert: -- files/primary-cluster-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/01-assert.yaml b/testing/kuttl/e2e-other/exporter-standby/01-assert.yaml deleted file mode 100644 index cd2d16c783..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/01-assert.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# Store the exporter pid as an annotation on the pod -- script: | - retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } - check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } - - pod=$(kubectl get pods -o name -n $NAMESPACE \ - -l postgres-operator.crunchydata.com/cluster=primary-cluster \ - -l postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) - [ "$pod" = "" ] && retry "Pod not found" && exit 1 - - condition_json=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.status.conditions}") - [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 - { check_containers_ready "$condition_json"; } || { - retry "containers not ready" - exit 1 - } - - pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) - kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} diff --git a/testing/kuttl/e2e-other/exporter-standby/02--set-primary-password.yaml b/testing/kuttl/e2e-other/exporter-standby/02--set-primary-password.yaml deleted file mode 100644 index 4e613a277f..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/02--set-primary-password.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/update-primary-password.yaml -assert: -- files/update-primary-password-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/03--create-standby.yaml b/testing/kuttl/e2e-other/exporter-standby/03--create-standby.yaml deleted file mode 100644 index fa2e653353..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/03--create-standby.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/standby-cluster.yaml -assert: -- files/standby-cluster-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/03-assert.yaml b/testing/kuttl/e2e-other/exporter-standby/03-assert.yaml deleted file mode 100644 index 327e5562fa..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/03-assert.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# Grab the exporter pod -# Check that the postgres_exporter pid is running -# Store the exporter pid as an annotation on the pod -- script: | - retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } - check_containers_ready() { bash -ceu 'echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@"; } - - pod=$(kubectl get pods -o name -n $NAMESPACE \ - -l postgres-operator.crunchydata.com/cluster=standby-cluster,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) - [ "$pod" = "" ] && retry "Pod not found" && exit 1 - - pid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) - kubectl annotate --overwrite -n ${NAMESPACE} ${pod} oldpid=${pid} diff --git a/testing/kuttl/e2e-other/exporter-standby/04--set-standby-password.yaml b/testing/kuttl/e2e-other/exporter-standby/04--set-standby-password.yaml deleted file mode 100644 index 18c98e423e..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/04--set-standby-password.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/update-standby-password.yaml -assert: -- files/update-standby-password-checks.yaml diff --git a/testing/kuttl/e2e-other/exporter-standby/04-assert.yaml b/testing/kuttl/e2e-other/exporter-standby/04-assert.yaml deleted file mode 100644 index 7e77784a65..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/04-assert.yaml +++ /dev/null @@ -1,38 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# Grab the exporter pod -# Check that the postgres_exporter pid is running -# Store the exporter pid as an annotation on the pod -- script: | - retry() { bash -ceu 'printf "$1\nSleeping...\n" && sleep 5' - "$@"; } - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - check_containers_ready() { bash -ceu ' echo "$1" | jq -e ".[] | select(.type==\"ContainersReady\") | .status==\"True\""' - "$@";} - - pod=$(kubectl get pods -o name -n $NAMESPACE \ - -l postgres-operator.crunchydata.com/cluster=standby-cluster,postgres-operator.crunchydata.com/crunchy-postgres-exporter=true) - [ "$pod" = "" ] && retry "Pod not found" && exit 1 - - oldPid=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.metadata.annotations.oldpid}") - newPid=$(kubectl exec ${pod} -n ${NAMESPACE} -c exporter -- cat /tmp/postgres_exporter.pid) - [ "${oldPid}" -eq "${newPid}" ] && retry "pid should have changed" && exit 1 - - password=$(kubectl exec -n ${NAMESPACE} ${pod} -c exporter -- bash -c 'cat /opt/crunchy/password') - { contains "${password}" "password"; } || { - retry "unexpected password: ${password}" - exit 1 - } - - condition_json=$(kubectl get ${pod} -n ${NAMESPACE} -o jsonpath="{.status.conditions}") - [ "$condition_json" = "" ] && retry "conditions not found" && exit 1 - { check_containers_ready "$condition_json"; } || { - retry "containers not ready" - exit 1 - } - - scrape_metrics=$(kubectl exec ${pod} -c exporter -n ${NAMESPACE} -- \ - curl --silent http://localhost:9187/metrics | grep "pg_exporter_last_scrape_error") - { contains "${scrape_metrics}" 'pg_exporter_last_scrape_error 0'; } || { - retry "${scrape_metrics}" - exit 1 - } diff --git a/testing/kuttl/e2e-other/exporter-standby/README.md b/testing/kuttl/e2e-other/exporter-standby/README.md deleted file mode 100644 index 34df4e5b7a..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# Exporter connection on standby cluster - -The exporter standby test will deploy two clusters, one primary and one standby. -Both clusters have monitoring enabled and are created in the same namespace to -allow for easy connections over the network. - -The `ccp_monitoring` password for both clusters are updated to match allowing -the exporter on the standby cluster to query postgres using the proper `ccp_monitoring` -password. diff --git a/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster-checks.yaml b/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster-checks.yaml deleted file mode 100644 index c2a59244a5..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster-checks.yaml +++ /dev/null @@ -1,20 +0,0 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: primary-cluster -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: primary-cluster - postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" -status: - phase: Running diff --git a/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster.yaml b/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster.yaml deleted file mode 100644 index 8f51632f5b..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/files/primary-cluster.yaml +++ /dev/null @@ -1,22 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: primary-cluster -spec: - postgresVersion: ${KUTTL_PG_VERSION} - customTLSSecret: - name: cluster-cert - customReplicationTLSSecret: - name: replication-cert - instances: - - name: instance1 - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - monitoring: - pgmonitor: - exporter: {} diff --git a/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster-checks.yaml b/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster-checks.yaml deleted file mode 100644 index 237dec721e..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster-checks.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: standby-cluster -status: - instances: - - name: instance1 - replicas: 1 - updatedReplicas: 1 - # The cluster should not become fully ready in this step, the ccp_monitoring password - # on the standby does not match the primary ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: standby-cluster - postgres-operator.crunchydata.com/crunchy-postgres-exporter: "true" -status: - phase: Running diff --git a/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster.yaml b/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster.yaml deleted file mode 100644 index 33e9ec2c2c..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/files/standby-cluster.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: standby-cluster -spec: - postgresVersion: ${KUTTL_PG_VERSION} - standby: - enabled: true - host: primary-cluster-primary - customTLSSecret: - name: cluster-cert - customReplicationTLSSecret: - name: replication-cert - instances: - - name: instance1 - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } - monitoring: - pgmonitor: - exporter: {} diff --git a/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password-checks.yaml b/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password-checks.yaml deleted file mode 100644 index 1ef72b49c9..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password-checks.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: primary-cluster-monitoring - labels: - postgres-operator.crunchydata.com/cluster: primary-cluster - postgres-operator.crunchydata.com/role: monitoring - ownerReferences: - - apiVersion: postgres-operator.crunchydata.com/v1beta1 - blockOwnerDeletion: true - controller: true - kind: PostgresCluster - name: primary-cluster -data: - # ensure the password is encoded to 'password' - password: cGFzc3dvcmQ= ---- -# TODO: Check that password is set as a file diff --git a/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password.yaml b/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password.yaml deleted file mode 100644 index a66450b103..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/files/update-primary-password.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: primary-cluster-monitoring - labels: - postgres-operator.crunchydata.com/cluster: primary-cluster - postgres-operator.crunchydata.com/role: monitoring -stringData: - password: password -data: -# Ensure data field is deleted so that password/verifier will be regenerated diff --git a/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password-checks.yaml b/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password-checks.yaml deleted file mode 100644 index 34d5357318..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password-checks.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: standby-cluster-monitoring - labels: - postgres-operator.crunchydata.com/cluster: standby-cluster - postgres-operator.crunchydata.com/role: monitoring - ownerReferences: - - apiVersion: postgres-operator.crunchydata.com/v1beta1 - blockOwnerDeletion: true - controller: true - kind: PostgresCluster - name: standby-cluster -data: - # ensure the password is encoded to 'password' - password: cGFzc3dvcmQ= ---- -# TODO: Check that password is set as a file diff --git a/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password.yaml b/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password.yaml deleted file mode 100644 index 57371fce93..0000000000 --- a/testing/kuttl/e2e-other/exporter-standby/files/update-standby-password.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: standby-cluster-monitoring - labels: - postgres-operator.crunchydata.com/cluster: standby-cluster - postgres-operator.crunchydata.com/role: monitoring -stringData: - password: password -data: -# Ensure data field is deleted so that password/verifier will be regenerated diff --git a/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml b/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml deleted file mode 100644 index 0e53eab2de..0000000000 --- a/testing/kuttl/e2e-other/exporter-upgrade/00--cluster.yaml +++ /dev/null @@ -1,30 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: exporter -spec: - postgresVersion: 14 - image: us.gcr.io/container-suite/crunchy-postgres:ubi8-14.0-5.0.3-0 - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - monitoring: - pgmonitor: - exporter: - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres-exporter:ubi8-5.3.1-0 diff --git a/testing/kuttl/e2e-other/exporter-upgrade/00-assert.yaml b/testing/kuttl/e2e-other/exporter-upgrade/00-assert.yaml deleted file mode 100644 index c569c97454..0000000000 --- a/testing/kuttl/e2e-other/exporter-upgrade/00-assert.yaml +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: exporter -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 diff --git a/testing/kuttl/e2e-other/exporter-upgrade/01--check-exporter.yaml b/testing/kuttl/e2e-other/exporter-upgrade/01--check-exporter.yaml deleted file mode 100644 index 0e72f2a0bf..0000000000 --- a/testing/kuttl/e2e-other/exporter-upgrade/01--check-exporter.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - script: | - set -e - PRIMARY=$( - kubectl get pod --namespace "${NAMESPACE}" \ - --output name --selector ' - postgres-operator.crunchydata.com/cluster=exporter, - postgres-operator.crunchydata.com/role=master' - ) - - # Ensure that the metrics endpoint is available from inside the exporter container - for i in {1..5}; do - kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -c exporter -- curl http://localhost:9187/metrics - sleep 2 - done - - # Ensure that the monitoring user exists and is configured. - kubectl exec --stdin --namespace "${NAMESPACE}" "${PRIMARY}" \ - -- psql -qb --set ON_ERROR_STOP=1 --file=- <<'SQL' - DO $$ - DECLARE - result record; - BEGIN - SELECT * INTO result FROM pg_catalog.pg_roles WHERE rolname = 'ccp_monitoring'; - ASSERT FOUND, 'user not found'; - ASSERT result.rolconfig @> '{jit=off}', format('got config: %L', result.rolconfig); - END $$ - SQL diff --git a/testing/kuttl/e2e-other/exporter-upgrade/02--update-cluster.yaml b/testing/kuttl/e2e-other/exporter-upgrade/02--update-cluster.yaml deleted file mode 100644 index cde17d80b4..0000000000 --- a/testing/kuttl/e2e-other/exporter-upgrade/02--update-cluster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: exporter -spec: - postgresVersion: 14 - image: registry.developers.crunchydata.com/crunchydata/crunchy-postgres:ubi8-14.5-1 diff --git a/testing/kuttl/e2e-other/exporter-upgrade/02-assert.yaml b/testing/kuttl/e2e-other/exporter-upgrade/02-assert.yaml deleted file mode 100644 index 9ad238b944..0000000000 --- a/testing/kuttl/e2e-other/exporter-upgrade/02-assert.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: exporter -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: exporter - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 ---- -apiVersion: v1 -kind: Service -metadata: - name: exporter-primary diff --git a/testing/kuttl/e2e-other/exporter-upgrade/03--check-exporter.yaml b/testing/kuttl/e2e-other/exporter-upgrade/03--check-exporter.yaml deleted file mode 100644 index 8161e463fc..0000000000 --- a/testing/kuttl/e2e-other/exporter-upgrade/03--check-exporter.yaml +++ /dev/null @@ -1,21 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: - - script: | - PRIMARY=$( - kubectl get pod --namespace "${NAMESPACE}" \ - --output name --selector ' - postgres-operator.crunchydata.com/cluster=exporter, - postgres-operator.crunchydata.com/role=master' - ) - - # Get errors from the exporter - # See the README.md for a discussion of these errors - ERR=$(kubectl logs --namespace "${NAMESPACE}" "${PRIMARY}" -c exporter | grep -e "Error running query on database") - ERR_COUNT=$(echo "$ERR" | wc -l) - - if [[ "$ERR_COUNT" -gt 2 ]]; then - echo "Errors in log from exporter: ${ERR}" - exit 1 - fi diff --git a/testing/kuttl/e2e-other/exporter-upgrade/README.md b/testing/kuttl/e2e-other/exporter-upgrade/README.md deleted file mode 100644 index fefe28a95c..0000000000 --- a/testing/kuttl/e2e-other/exporter-upgrade/README.md +++ /dev/null @@ -1,31 +0,0 @@ -The exporter-upgrade test makes sure that PGO updates an extension used for monitoring. This -avoids an error where a user might update to a new PG image with a newer extension, but with an -older extension operative. - -Note: This test relies on two `crunchy-postgres` images with known, different `pgnodemx` extensions: -the image created in 00--cluster.yaml has `pgnodemx` 1.1; the image we update the cluster to in -02--update-cluster.yaml has `pgnodemx` 1.3. - -00-01 -This starts up a cluster with a purposely outdated `pgnodemx` extension. Because we want a specific -extension, the image used here is hard-coded (and so outdated it's not publicly available). - -(This image is so outdated that it doesn't finish creating a backup with the current PGO, which is -why the 00-assert.yaml only checks that the pod is ready; and why 01--check-exporter.yaml wraps the -call in a retry loop.) - -02-03 -The cluster is updated with a newer (and hardcoded) image with a newer version of `pgnodemx`. Due -to the change made in https://github.com/CrunchyData/postgres-operator/pull/3400, this should no -longer produce multiple errors. - -Note: a few errors may be logged after the `exporter` container attempts to run the `pgnodemx` -functions but before the extension is updated. So this checks that there are no more than 2 errors, -since that was the observed maximum number of printed errors during manual tests of the check. - -For instance, using these hardcoded images (with `pgnodemx` versions 1.1 and 1.3), those errors were: - -``` -Error running query on database \"localhost:5432\": ccp_nodemx_disk_activity pq: query-specified return tuple and function return type are not compatible" -Error running query on database \"localhost:5432\": ccp_nodemx_data_disk pq: query-specified return tuple and function return type are not compatible -``` diff --git a/testing/kuttl/e2e-other/gssapi/00-assert.yaml b/testing/kuttl/e2e-other/gssapi/00-assert.yaml deleted file mode 100644 index ea828be0c4..0000000000 --- a/testing/kuttl/e2e-other/gssapi/00-assert.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ConfigMap -metadata: - name: krb5 ---- -apiVersion: v1 -kind: Secret -metadata: - name: krb5-keytab diff --git a/testing/kuttl/e2e-other/gssapi/00-krb5-keytab.yaml b/testing/kuttl/e2e-other/gssapi/00-krb5-keytab.yaml deleted file mode 100644 index 6311193d55..0000000000 --- a/testing/kuttl/e2e-other/gssapi/00-krb5-keytab.yaml +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: -- command: kubectl exec -n krb5 -it krb5-kdc-0 -- /krb5-scripts/krb5.sh "${NAMESPACE}" diff --git a/testing/kuttl/e2e-other/gssapi/01-assert.yaml b/testing/kuttl/e2e-other/gssapi/01-assert.yaml deleted file mode 100644 index dbda953ead..0000000000 --- a/testing/kuttl/e2e-other/gssapi/01-assert.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: gssapi -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: v1 -kind: Service -metadata: - name: gssapi-primary diff --git a/testing/kuttl/e2e-other/gssapi/01-cluster.yaml b/testing/kuttl/e2e-other/gssapi/01-cluster.yaml deleted file mode 100644 index 8acfe46c4d..0000000000 --- a/testing/kuttl/e2e-other/gssapi/01-cluster.yaml +++ /dev/null @@ -1,41 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: gssapi -spec: - config: - files: - - secret: - name: krb5-keytab - - configMap: - name: krb5 - patroni: - dynamicConfiguration: - postgresql: - pg_hba: - - host postgres postgres 0.0.0.0/0 scram-sha-256 - - host all krb5hippo@PGO.CRUNCHYDATA.COM 0.0.0.0/0 gss - parameters: - krb_server_keyfile: /etc/postgres/krb5.keytab - users: - - name: postgres - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e-other/gssapi/02-assert.yaml b/testing/kuttl/e2e-other/gssapi/02-assert.yaml deleted file mode 100644 index 36f85d95d4..0000000000 --- a/testing/kuttl/e2e-other/gssapi/02-assert.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: psql-connect-gssapi -status: - succeeded: 1 diff --git a/testing/kuttl/e2e-other/gssapi/02-psql-connect.yaml b/testing/kuttl/e2e-other/gssapi/02-psql-connect.yaml deleted file mode 100644 index 30f02b3b19..0000000000 --- a/testing/kuttl/e2e-other/gssapi/02-psql-connect.yaml +++ /dev/null @@ -1,47 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: psql-connect-gssapi -spec: - backoffLimit: 6 - template: - spec: - restartPolicy: Never - containers: - - name: psql - image: ${KUTTL_PSQL_IMAGE} - command: - - bash - - -c - - -- - - |- - psql -c 'create user "krb5hippo@PGO.CRUNCHYDATA.COM";' - kinit -k -t /krb5-conf/krb5.keytab krb5hippo@PGO.CRUNCHYDATA.COM - psql -U krb5hippo@PGO.CRUNCHYDATA.COM -h gssapi-primary.$(NAMESPACE).svc.cluster.local -d postgres \ - -c 'select version();' - env: - - name: NAMESPACE - valueFrom: { fieldRef: { fieldPath: metadata.namespace } } - - name: PGHOST - valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: host } } - - name: PGPORT - valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: port } } - - name: PGUSER - valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: user } } - - name: PGPASSWORD - valueFrom: { secretKeyRef: { name: gssapi-pguser-postgres, key: password } } - - name: PGDATABASE - value: postgres - - name: KRB5_CONFIG - value: /krb5-conf/krb5.conf - volumeMounts: - - name: krb5-conf - mountPath: /krb5-conf - volumes: - - name: krb5-conf - projected: - sources: - - configMap: - name: krb5 - - secret: - name: krb5-keytab diff --git a/testing/kuttl/e2e-other/gssapi/README.md b/testing/kuttl/e2e-other/gssapi/README.md deleted file mode 100644 index 72d8d2b997..0000000000 --- a/testing/kuttl/e2e-other/gssapi/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# GSSAPI Authentication - -This test verifies that it is possible to properly configure PostgreSQL for GSSAPI -authentication. This is done by configuring a PostgresCluster for GSSAPI authentication, -and then utilizing a Kerberos ticket that has been issued by a Kerberos KDC server to log into -PostgreSQL. - -## Assumptions - -- A Kerberos Key Distribution Center (KDC) Pod named `krb5-kdc-0` is deployed inside of a `krb5` -namespace within the Kubernetes cluster -- The KDC server (`krb5-kdc-0`) contains a `/krb5-conf/krb5.sh` script that can be run as part -of the test to create the Kerberos principals, keytab secret and client configuration needed to -successfully run the test diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/01-valid-upgrade.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/01--valid-upgrade.yaml rename to testing/kuttl/e2e-other/major-upgrade-missing-image/01-valid-upgrade.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/10-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/10--cluster.yaml rename to testing/kuttl/e2e-other/major-upgrade-missing-image/10-cluster.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/11-shutdown-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/11--shutdown-cluster.yaml rename to testing/kuttl/e2e-other/major-upgrade-missing-image/11-shutdown-cluster.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/12-start-and-update-version.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/12--start-and-update-version.yaml rename to testing/kuttl/e2e-other/major-upgrade-missing-image/12-start-and-update-version.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/13-shutdown-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/13--shutdown-cluster.yaml rename to testing/kuttl/e2e-other/major-upgrade-missing-image/13-shutdown-cluster.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/14-annotate-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/14--annotate-cluster.yaml rename to testing/kuttl/e2e-other/major-upgrade-missing-image/14-annotate-cluster.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/15-start-cluster.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/15--start-cluster.yaml rename to testing/kuttl/e2e-other/major-upgrade-missing-image/15-start-cluster.yaml diff --git a/testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml b/testing/kuttl/e2e-other/major-upgrade-missing-image/17-check-version.yaml similarity index 100% rename from testing/kuttl/e2e-other/major-upgrade-missing-image/17--check-version.yaml rename to testing/kuttl/e2e-other/major-upgrade-missing-image/17-check-version.yaml diff --git a/testing/kuttl/e2e-other/postgis-cluster/00--cluster.yaml b/testing/kuttl/e2e-other/postgis-cluster/00--cluster.yaml deleted file mode 100644 index 8dc88788bc..0000000000 --- a/testing/kuttl/e2e-other/postgis-cluster/00--cluster.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: postgis -spec: - postgresVersion: ${KUTTL_PG_VERSION} - postGISVersion: "${KUTTL_POSTGIS_VERSION}" - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e-other/postgis-cluster/00-assert.yaml b/testing/kuttl/e2e-other/postgis-cluster/00-assert.yaml deleted file mode 100644 index b0bda7753f..0000000000 --- a/testing/kuttl/e2e-other/postgis-cluster/00-assert.yaml +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: postgis -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: postgis - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 ---- -apiVersion: v1 -kind: Service -metadata: - name: postgis-primary diff --git a/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml b/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml deleted file mode 100644 index 814958a9f6..0000000000 --- a/testing/kuttl/e2e-other/postgis-cluster/01--psql-connect.yaml +++ /dev/null @@ -1,132 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: psql-postgis-connect -spec: - backoffLimit: 6 - template: - metadata: - labels: { postgres-operator-test: kuttl } - spec: - restartPolicy: Never - containers: - - name: psql - image: ${KUTTL_PSQL_IMAGE} - env: - - name: PGHOST - valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: host } } - - name: PGPORT - valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: port } } - - name: PGDATABASE - valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: dbname } } - - name: PGUSER - valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: user } } - - name: PGPASSWORD - valueFrom: { secretKeyRef: { name: postgis-pguser-postgis, key: password } } - - { name: GIS_VERSION, value: "${KUTTL_POSTGIS_VERSION}" } - # Do not wait indefinitely. - - { name: PGCONNECT_TIMEOUT, value: '5' } - command: - - bash - - -c - - | - # Ensure PostGIS version is set - GIS_VERSION=${KUTTL_POSTGIS_VERSION} - GIS_VERSION=${GIS_VERSION:-notset} - - # check version - RESULT=$(psql -c "DO \$\$ - DECLARE - result boolean; - BEGIN - SELECT postgis_version() LIKE '%${GIS_VERSION}%' INTO result; - ASSERT result = 't', 'PostGIS version incorrect'; - END \$\$;" 2>&1) - - if [[ "$RESULT" == *"ERROR"* ]]; then - echo "$RESULT" - exit 1 - fi - - # check full version - RESULT=$(psql -c "DO \$\$ - DECLARE - result boolean; - BEGIN - SELECT postgis_full_version() LIKE 'POSTGIS=\"%${GIS_VERSION}%' INTO result; - ASSERT result = 't', 'PostGIS full version incorrect'; - END \$\$;" 2>&1) - - if [[ "$RESULT" == *"ERROR"* ]]; then - echo "$RESULT" - exit 1 - fi - - # check expected schemas (tiger, tiger_data and topology) - # - https://www.postgresql.org/docs/current/catalog-pg-namespace.html - RESULT=$(psql -c "DO \$\$ - DECLARE - result text; - BEGIN - SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname='tiger' INTO result; - ASSERT result = 'tiger', 'PostGIS tiger schema missing'; - END \$\$;" 2>&1) - - if [[ "$RESULT" == *"ERROR"* ]]; then - echo "$RESULT" - exit 1 - fi - - RESULT=$(psql -c "DO \$\$ - DECLARE - result text; - BEGIN - SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname='tiger_data' INTO result; - ASSERT result = 'tiger_data', 'PostGIS tiger_data schema missing'; - END \$\$;" 2>&1) - - if [[ "$RESULT" == *"ERROR"* ]]; then - echo "$RESULT" - exit 1 - fi - - RESULT=$(psql -c "DO \$\$ - DECLARE - result text; - BEGIN - SELECT nspname FROM pg_catalog.pg_namespace WHERE nspname='topology' INTO result; - ASSERT result = 'topology', 'PostGIS topology schema missing'; - END \$\$;" 2>&1) - - if [[ "$RESULT" == *"ERROR"* ]]; then - echo "$RESULT" - exit 1 - fi - - # check point creation - RESULT=$(psql -c "DO \$\$ - DECLARE - result text; - BEGIN - SELECT pg_typeof(ST_MakePoint(28.385200,-81.563900)) INTO result; - ASSERT result = 'geometry', 'Unable to create PostGIS point'; - END \$\$;" 2>&1) - - if [[ "$RESULT" == *"ERROR"* ]]; then - echo "$RESULT" - exit 1 - fi - - # check GeoJSON function - RESULT=$(psql -c "DO \$\$ - DECLARE - result text; - BEGIN - SELECT ST_AsGeoJSON('SRID=4326;POINT(-118.4079 33.9434)'::geography) INTO result; - ASSERT result = '{\"type\":\"Point\",\"coordinates\":[-118.4079,33.9434]}', FORMAT('GeoJSON check failed, got %L', result); - END \$\$;" 2>&1) - - if [[ "$RESULT" == *"ERROR"* ]]; then - echo "$RESULT" - exit 1 - fi diff --git a/testing/kuttl/e2e-other/postgis-cluster/01-assert.yaml b/testing/kuttl/e2e-other/postgis-cluster/01-assert.yaml deleted file mode 100644 index 22e9e6f9de..0000000000 --- a/testing/kuttl/e2e-other/postgis-cluster/01-assert.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: batch/v1 -kind: Job -metadata: - name: psql-postgis-connect -status: - succeeded: 1 diff --git a/testing/kuttl/e2e-other/replica-service/00-base-cluster.yaml b/testing/kuttl/e2e-other/replica-service/00-base-cluster.yaml deleted file mode 100644 index 725f40de14..0000000000 --- a/testing/kuttl/e2e-other/replica-service/00-base-cluster.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/base-cluster.yaml -assert: -- files/base-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/01-node-port.yaml b/testing/kuttl/e2e-other/replica-service/01-node-port.yaml deleted file mode 100644 index c80e947e40..0000000000 --- a/testing/kuttl/e2e-other/replica-service/01-node-port.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/np-cluster.yaml -assert: -- files/np-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/02-loadbalancer.yaml b/testing/kuttl/e2e-other/replica-service/02-loadbalancer.yaml deleted file mode 100644 index f1433111db..0000000000 --- a/testing/kuttl/e2e-other/replica-service/02-loadbalancer.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/lb-cluster.yaml -assert: -- files/lb-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/03-cluster-ip.yaml b/testing/kuttl/e2e-other/replica-service/03-cluster-ip.yaml deleted file mode 100644 index de6055ea6b..0000000000 --- a/testing/kuttl/e2e-other/replica-service/03-cluster-ip.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/cip-cluster.yaml -assert: -- files/cip-check.yaml diff --git a/testing/kuttl/e2e-other/replica-service/files/base-check.yaml b/testing/kuttl/e2e-other/replica-service/files/base-check.yaml deleted file mode 100644 index a83fce0f57..0000000000 --- a/testing/kuttl/e2e-other/replica-service/files/base-check.yaml +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: service -status: - instances: - - name: instance1 - readyReplicas: 2 - replicas: 2 - updatedReplicas: 2 ---- -apiVersion: v1 -kind: Service -metadata: - name: service-replicas diff --git a/testing/kuttl/e2e-other/replica-service/files/base-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/base-cluster.yaml deleted file mode 100644 index 67c4481d2f..0000000000 --- a/testing/kuttl/e2e-other/replica-service/files/base-cluster.yaml +++ /dev/null @@ -1,28 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: service -spec: - postgresVersion: ${KUTTL_PG_VERSION} - replicaService: - type: ClusterIP - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 0.5Gi - replicas: 2 - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 0.5Gi diff --git a/testing/kuttl/e2e-other/replica-service/files/cip-check.yaml b/testing/kuttl/e2e-other/replica-service/files/cip-check.yaml deleted file mode 100644 index 5bf5422bb8..0000000000 --- a/testing/kuttl/e2e-other/replica-service/files/cip-check.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: service-replicas -spec: - type: ClusterIP - selector: - postgres-operator.crunchydata.com/cluster: service - postgres-operator.crunchydata.com/role: replica diff --git a/testing/kuttl/e2e-other/replica-service/files/cip-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/cip-cluster.yaml deleted file mode 100644 index 8545aa8223..0000000000 --- a/testing/kuttl/e2e-other/replica-service/files/cip-cluster.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: service -spec: - replicaService: - type: ClusterIP - nodePort: null diff --git a/testing/kuttl/e2e-other/replica-service/files/lb-check.yaml b/testing/kuttl/e2e-other/replica-service/files/lb-check.yaml deleted file mode 100644 index b8519491c7..0000000000 --- a/testing/kuttl/e2e-other/replica-service/files/lb-check.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: service-replicas -spec: - type: LoadBalancer - selector: - postgres-operator.crunchydata.com/cluster: service - postgres-operator.crunchydata.com/role: replica diff --git a/testing/kuttl/e2e-other/replica-service/files/lb-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/lb-cluster.yaml deleted file mode 100644 index 5e18f71dcd..0000000000 --- a/testing/kuttl/e2e-other/replica-service/files/lb-cluster.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: service -spec: - replicaService: - type: LoadBalancer - nodePort: null diff --git a/testing/kuttl/e2e-other/replica-service/files/np-check.yaml b/testing/kuttl/e2e-other/replica-service/files/np-check.yaml deleted file mode 100644 index c7d791e36a..0000000000 --- a/testing/kuttl/e2e-other/replica-service/files/np-check.yaml +++ /dev/null @@ -1,14 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: service-replicas -spec: - type: NodePort - ports: - - name: postgres - port: 5432 - protocol: TCP - targetPort: postgres - selector: - postgres-operator.crunchydata.com/cluster: service - postgres-operator.crunchydata.com/role: replica diff --git a/testing/kuttl/e2e-other/replica-service/files/np-cluster.yaml b/testing/kuttl/e2e-other/replica-service/files/np-cluster.yaml deleted file mode 100644 index 0b20ae63ad..0000000000 --- a/testing/kuttl/e2e-other/replica-service/files/np-cluster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: service -spec: - replicaService: - type: NodePort diff --git a/testing/kuttl/e2e-other/resize-volume/00-assert.yaml b/testing/kuttl/e2e-other/resize-volume/00-assert.yaml deleted file mode 100644 index b4372b75e7..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/00-assert.yaml +++ /dev/null @@ -1,7 +0,0 @@ -# Ensure that the default StorageClass supports VolumeExpansion -apiVersion: storage.k8s.io/v1 -kind: StorageClass -metadata: - annotations: - storageclass.kubernetes.io/is-default-class: "true" -allowVolumeExpansion: true diff --git a/testing/kuttl/e2e-other/resize-volume/01--cluster.yaml b/testing/kuttl/e2e-other/resize-volume/01--cluster.yaml deleted file mode 100644 index 4737fb25f4..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/01--cluster.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: resize-volume-up -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e-other/resize-volume/01-assert.yaml b/testing/kuttl/e2e-other/resize-volume/01-assert.yaml deleted file mode 100644 index ea72af469c..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/01-assert.yaml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: resize-volume-up -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-up - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 ---- -apiVersion: v1 -kind: Service -metadata: - name: resize-volume-up-primary ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-up - postgres-operator.crunchydata.com/instance-set: instance1 -spec: - resources: - requests: - storage: 1Gi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - phase: Bound ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-up - postgres-operator.crunchydata.com/data: pgbackrest - postgres-operator.crunchydata.com/pgbackrest-repo: repo1 -spec: - resources: - requests: - storage: 1Gi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - phase: Bound diff --git a/testing/kuttl/e2e-other/resize-volume/02--create-data.yaml b/testing/kuttl/e2e-other/resize-volume/02--create-data.yaml deleted file mode 100644 index c41a6f80c4..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/02--create-data.yaml +++ /dev/null @@ -1,31 +0,0 @@ ---- -# Create some data that should be present after resizing. -apiVersion: batch/v1 -kind: Job -metadata: - name: create-data - labels: { postgres-operator-test: kuttl } -spec: - backoffLimit: 3 - template: - metadata: - labels: { postgres-operator-test: kuttl } - spec: - restartPolicy: Never - containers: - - name: psql - image: ${KUTTL_PSQL_IMAGE} - env: - - name: PGURI - valueFrom: { secretKeyRef: { name: resize-volume-up-pguser-resize-volume-up, key: uri } } - - # Do not wait indefinitely. - - { name: PGCONNECT_TIMEOUT, value: '5' } - - command: - - psql - - $(PGURI) - - --set=ON_ERROR_STOP=1 - - --command - - | - CREATE TABLE important (data) AS VALUES ('treasure'); diff --git a/testing/kuttl/e2e-other/resize-volume/02-assert.yaml b/testing/kuttl/e2e-other/resize-volume/02-assert.yaml deleted file mode 100644 index fdb42e68f5..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/02-assert.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: create-data -status: - succeeded: 1 diff --git a/testing/kuttl/e2e-other/resize-volume/03--resize.yaml b/testing/kuttl/e2e-other/resize-volume/03--resize.yaml deleted file mode 100644 index dd7c96901f..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/03--resize.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: resize-volume-up -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 2Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 2Gi diff --git a/testing/kuttl/e2e-other/resize-volume/03-assert.yaml b/testing/kuttl/e2e-other/resize-volume/03-assert.yaml deleted file mode 100644 index 11aa230cd4..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/03-assert.yaml +++ /dev/null @@ -1,37 +0,0 @@ -# We know that the PVC sizes have change so now we can check that they have been -# updated to have the expected size ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-up - postgres-operator.crunchydata.com/instance-set: instance1 -spec: - resources: - requests: - storage: 2Gi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 2Gi - phase: Bound ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-up - postgres-operator.crunchydata.com/data: pgbackrest - postgres-operator.crunchydata.com/pgbackrest-repo: repo1 -spec: - resources: - requests: - storage: 2Gi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 2Gi - phase: Bound diff --git a/testing/kuttl/e2e-other/resize-volume/06--check-data.yaml b/testing/kuttl/e2e-other/resize-volume/06--check-data.yaml deleted file mode 100644 index 682a46ef4d..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/06--check-data.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- -# Confirm that all the data still exists. -apiVersion: batch/v1 -kind: Job -metadata: - name: check-data - labels: { postgres-operator-test: kuttl } -spec: - backoffLimit: 3 - template: - metadata: - labels: { postgres-operator-test: kuttl } - spec: - restartPolicy: Never - containers: - - name: psql - image: ${KUTTL_PSQL_IMAGE} - env: - - name: PGURI - valueFrom: { secretKeyRef: { name: resize-volume-up-pguser-resize-volume-up, key: uri } } - - # Do not wait indefinitely. - - { name: PGCONNECT_TIMEOUT, value: '5' } - - # Confirm that all the data still exists. - # Note: the `$$$$` is reduced to `$$` by Kubernetes. - # - https://kubernetes.io/docs/tasks/inject-data-application/ - command: - - psql - - $(PGURI) - - --set=ON_ERROR_STOP=1 - - --command - - | - DO $$$$ - DECLARE - keep_data jsonb; - BEGIN - SELECT jsonb_agg(important) INTO keep_data FROM important; - ASSERT keep_data = '[{"data":"treasure"}]', format('got %L', keep_data); - END $$$$; diff --git a/testing/kuttl/e2e-other/resize-volume/06-assert.yaml b/testing/kuttl/e2e-other/resize-volume/06-assert.yaml deleted file mode 100644 index cf743b8701..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/06-assert.yaml +++ /dev/null @@ -1,7 +0,0 @@ ---- -apiVersion: batch/v1 -kind: Job -metadata: - name: check-data -status: - succeeded: 1 diff --git a/testing/kuttl/e2e-other/resize-volume/11--cluster.yaml b/testing/kuttl/e2e-other/resize-volume/11--cluster.yaml deleted file mode 100644 index 8d2d602ca6..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/11--cluster.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: resize-volume-down -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 2Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 2Gi diff --git a/testing/kuttl/e2e-other/resize-volume/11-assert.yaml b/testing/kuttl/e2e-other/resize-volume/11-assert.yaml deleted file mode 100644 index 666b4a85c7..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/11-assert.yaml +++ /dev/null @@ -1,59 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: resize-volume-down -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: batch/v1 -kind: Job -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-down - postgres-operator.crunchydata.com/pgbackrest-backup: replica-create -status: - succeeded: 1 ---- -apiVersion: v1 -kind: Service -metadata: - name: resize-volume-down-primary ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-down - postgres-operator.crunchydata.com/instance-set: instance1 -spec: - resources: - requests: - storage: 2Gi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 2Gi - phase: Bound ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-down - postgres-operator.crunchydata.com/data: pgbackrest - postgres-operator.crunchydata.com/pgbackrest-repo: repo1 -spec: - resources: - requests: - storage: 2Gi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 2Gi - phase: Bound diff --git a/testing/kuttl/e2e-other/resize-volume/13--resize.yaml b/testing/kuttl/e2e-other/resize-volume/13--resize.yaml deleted file mode 100644 index 77af2f2aa3..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/13--resize.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: resize-volume-down -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - backups: - pgbackrest: - repos: - - name: repo1 - volume: - volumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e-other/resize-volume/13-assert.yaml b/testing/kuttl/e2e-other/resize-volume/13-assert.yaml deleted file mode 100644 index 4210214fd6..0000000000 --- a/testing/kuttl/e2e-other/resize-volume/13-assert.yaml +++ /dev/null @@ -1,43 +0,0 @@ -apiVersion: v1 -kind: Event -type: Warning -involvedObject: - apiVersion: postgres-operator.crunchydata.com/v1beta1 - kind: PostgresCluster - name: resize-volume-down -reason: PersistentVolumeError ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-down - postgres-operator.crunchydata.com/instance-set: instance1 -spec: - resources: - requests: - storage: 2Gi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 2Gi - phase: Bound ---- -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - labels: - postgres-operator.crunchydata.com/cluster: resize-volume-down - postgres-operator.crunchydata.com/data: pgbackrest - postgres-operator.crunchydata.com/pgbackrest-repo: repo1 -spec: - resources: - requests: - storage: 2Gi -status: - accessModes: - - ReadWriteOnce - capacity: - storage: 2Gi - phase: Bound diff --git a/testing/kuttl/e2e/cluster-pause/00--cluster.yaml b/testing/kuttl/e2e/cluster-pause/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/cluster-pause/00--cluster.yaml rename to testing/kuttl/e2e/cluster-pause/00-cluster.yaml diff --git a/testing/kuttl/e2e/cluster-pause/01--cluster-paused.yaml b/testing/kuttl/e2e/cluster-pause/01-cluster-paused.yaml similarity index 100% rename from testing/kuttl/e2e/cluster-pause/01--cluster-paused.yaml rename to testing/kuttl/e2e/cluster-pause/01-cluster-paused.yaml diff --git a/testing/kuttl/e2e/cluster-pause/02--cluster-resume.yaml b/testing/kuttl/e2e/cluster-pause/02-cluster-resume.yaml similarity index 100% rename from testing/kuttl/e2e/cluster-pause/02--cluster-resume.yaml rename to testing/kuttl/e2e/cluster-pause/02-cluster-resume.yaml diff --git a/testing/kuttl/e2e/cluster-start/00--cluster.yaml b/testing/kuttl/e2e/cluster-start/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/cluster-start/00--cluster.yaml rename to testing/kuttl/e2e/cluster-start/00-cluster.yaml diff --git a/testing/kuttl/e2e/cluster-start/01--connect.yaml b/testing/kuttl/e2e/cluster-start/01-connect.yaml similarity index 100% rename from testing/kuttl/e2e/cluster-start/01--connect.yaml rename to testing/kuttl/e2e/cluster-start/01-connect.yaml diff --git a/testing/kuttl/e2e/delete-namespace/README.md b/testing/kuttl/e2e/delete-namespace/README.md index 697e2ae915..4b0f951fef 100644 --- a/testing/kuttl/e2e/delete-namespace/README.md +++ b/testing/kuttl/e2e/delete-namespace/README.md @@ -6,6 +6,6 @@ * Check that nothing remains. Note: KUTTL provides a `$NAMESPACE` var that can be used in scripts/commands, -but which cannot be used in object definition yamls (like `01--cluster.yaml`). +but which cannot be used in object definition yamls (like `01-cluster.yaml`). Therefore, we use a given, non-random namespace that is defined in the makefile and generated with `generate-kuttl`. diff --git a/testing/kuttl/e2e/exporter-custom-queries/00--create-cluster.yaml b/testing/kuttl/e2e/exporter-custom-queries/00-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/exporter-custom-queries/00--create-cluster.yaml rename to testing/kuttl/e2e/exporter-custom-queries/00-create-cluster.yaml diff --git a/testing/kuttl/e2e/exporter-custom-queries/01--change-custom-queries.yaml b/testing/kuttl/e2e/exporter-custom-queries/01-change-custom-queries.yaml similarity index 100% rename from testing/kuttl/e2e/exporter-custom-queries/01--change-custom-queries.yaml rename to testing/kuttl/e2e/exporter-custom-queries/01-change-custom-queries.yaml diff --git a/testing/kuttl/e2e/exporter-custom-queries/README.md b/testing/kuttl/e2e/exporter-custom-queries/README.md index 801b6d02a8..6ec7462628 100644 --- a/testing/kuttl/e2e/exporter-custom-queries/README.md +++ b/testing/kuttl/e2e/exporter-custom-queries/README.md @@ -1,3 +1,3 @@ # Exporter -**Note**: This series of tests depends on PGO being deployed with the `AppendCustomQueries` feature gate OFF. There is a separate set of tests in `e2e-other` that tests the `AppendCustomQueries` functionality. +**Note**: This series of tests depends on PGO being deployed with the `AppendCustomQueries` feature gate OFF. diff --git a/testing/kuttl/e2e/exporter-no-tls/00--create-cluster.yaml b/testing/kuttl/e2e/exporter-no-tls/00-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/exporter-no-tls/00--create-cluster.yaml rename to testing/kuttl/e2e/exporter-no-tls/00-create-cluster.yaml diff --git a/testing/kuttl/e2e/exporter-password-change/00--create-cluster.yaml b/testing/kuttl/e2e/exporter-password-change/00-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/exporter-password-change/00--create-cluster.yaml rename to testing/kuttl/e2e/exporter-password-change/00-create-cluster.yaml diff --git a/testing/kuttl/e2e/exporter-password-change/02--change-password.yaml b/testing/kuttl/e2e/exporter-password-change/02-change-password.yaml similarity index 100% rename from testing/kuttl/e2e/exporter-password-change/02--change-password.yaml rename to testing/kuttl/e2e/exporter-password-change/02-change-password.yaml diff --git a/testing/kuttl/e2e/exporter-password-change/README.md b/testing/kuttl/e2e/exporter-password-change/README.md index 2a5b596309..d3d11f263c 100644 --- a/testing/kuttl/e2e/exporter-password-change/README.md +++ b/testing/kuttl/e2e/exporter-password-change/README.md @@ -1,6 +1,6 @@ # Exporter Password Change -## 00--create-cluster: +## 00-create-cluster: The TestStep will: 1) Apply the `files/inital-postgrescluster.yaml` file to create a cluster with monitoring enabled @@ -13,7 +13,7 @@ The TestStep will: This TestAssert will loop through a script until: 1) the instance pod has the `ContainersReady` condition with status `true` -2) the asserts from `00--create-cluster` are met. +2) the asserts from `00-create-cluster` are met. ## 01-assert: diff --git a/testing/kuttl/e2e/exporter-tls/00--create-cluster.yaml b/testing/kuttl/e2e/exporter-tls/00-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/exporter-tls/00--create-cluster.yaml rename to testing/kuttl/e2e/exporter-tls/00-create-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/01-valid-upgrade.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade-missing-image/01--valid-upgrade.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/01-valid-upgrade.yaml diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/10-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade-missing-image/10--cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/10-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/11-shutdown-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade-missing-image/11--shutdown-cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/11-shutdown-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/12-start-and-update-version.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade-missing-image/12--start-and-update-version.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/12-start-and-update-version.yaml diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/13-shutdown-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade-missing-image/13--shutdown-cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/13-shutdown-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/14-annotate-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade-missing-image/14--annotate-cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/14-annotate-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/15-start-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade-missing-image/15--start-cluster.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/15-start-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/17--check-version.yaml b/testing/kuttl/e2e/major-upgrade-missing-image/17-check-version.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade-missing-image/17--check-version.yaml rename to testing/kuttl/e2e/major-upgrade-missing-image/17-check-version.yaml diff --git a/testing/kuttl/e2e/major-upgrade-missing-image/README.md b/testing/kuttl/e2e/major-upgrade-missing-image/README.md index 341cc854f7..b148868cee 100644 --- a/testing/kuttl/e2e/major-upgrade-missing-image/README.md +++ b/testing/kuttl/e2e/major-upgrade-missing-image/README.md @@ -6,31 +6,31 @@ PostgresCluster spec or via the RELATED_IMAGES environment variables. ### Basic PGUpgrade controller and CRD instance validation -* 01--valid-upgrade: create a valid PGUpgrade instance +* 01-valid-upgrade: create a valid PGUpgrade instance * 01-assert: check that the PGUpgrade instance exists and has the expected status ### Verify new statuses for missing required container images -* 10--cluster: create the cluster with an unavailable image (i.e. Postgres 10) +* 10-cluster: create the cluster with an unavailable image (i.e. Postgres 10) * 10-assert: check that the PGUpgrade instance has the expected reason: "PGClusterNotShutdown" * 11-shutdown-cluster: set the spec.shutdown value to 'true' as required for upgrade * 11-assert: check that the new reason is set, "PGClusterPrimaryNotIdentified" ### Update to an available Postgres version, start and upgrade PostgresCluster -* 12--start-and-update-version: update the Postgres version on both CRD instances and set 'shutdown' to false +* 12-start-and-update-version: update the Postgres version on both CRD instances and set 'shutdown' to false * 12-assert: verify that the cluster is running and the PGUpgrade instance now has the new status info with reason: "PGClusterNotShutdown" -* 13--shutdown-cluster: set spec.shutdown to 'true' +* 13-shutdown-cluster: set spec.shutdown to 'true' * 13-assert: check that the PGUpgrade instance has the expected reason: "PGClusterMissingRequiredAnnotation" -* 14--annotate-cluster: set the required annotation +* 14-annotate-cluster: set the required annotation * 14-assert: verify that the upgrade succeeded and the new Postgres version shows in the cluster's status -* 15--start-cluster: set the new Postgres version and spec.shutdown to 'false' +* 15-start-cluster: set the new Postgres version and spec.shutdown to 'false' ### Verify upgraded PostgresCluster * 15-assert: verify that the cluster is running * 16-check-pgbackrest: check that the pgbackrest setup has successfully completed -* 17--check-version: check the version reported by PostgreSQL +* 17-check-version: check the version reported by PostgreSQL * 17-assert: assert the Job from the previous step succeeded diff --git a/testing/kuttl/e2e/major-upgrade/01--invalid-pgupgrade.yaml b/testing/kuttl/e2e/major-upgrade/01-invalid-pgupgrade.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/01--invalid-pgupgrade.yaml rename to testing/kuttl/e2e/major-upgrade/01-invalid-pgupgrade.yaml diff --git a/testing/kuttl/e2e/major-upgrade/02--valid-upgrade.yaml b/testing/kuttl/e2e/major-upgrade/02-valid-upgrade.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/02--valid-upgrade.yaml rename to testing/kuttl/e2e/major-upgrade/02-valid-upgrade.yaml diff --git a/testing/kuttl/e2e/major-upgrade/10--already-updated-cluster.yaml b/testing/kuttl/e2e/major-upgrade/10-already-updated-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/10--already-updated-cluster.yaml rename to testing/kuttl/e2e/major-upgrade/10-already-updated-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade/20--cluster-with-invalid-version.yaml b/testing/kuttl/e2e/major-upgrade/20-cluster-with-invalid-version.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/20--cluster-with-invalid-version.yaml rename to testing/kuttl/e2e/major-upgrade/20-cluster-with-invalid-version.yaml diff --git a/testing/kuttl/e2e/major-upgrade/30--cluster.yaml b/testing/kuttl/e2e/major-upgrade/30-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/30--cluster.yaml rename to testing/kuttl/e2e/major-upgrade/30-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade/31--create-data.yaml b/testing/kuttl/e2e/major-upgrade/31-create-data.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/31--create-data.yaml rename to testing/kuttl/e2e/major-upgrade/31-create-data.yaml diff --git a/testing/kuttl/e2e/major-upgrade/32--shutdown-cluster.yaml b/testing/kuttl/e2e/major-upgrade/32-shutdown-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/32--shutdown-cluster.yaml rename to testing/kuttl/e2e/major-upgrade/32-shutdown-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade/33--annotate-cluster.yaml b/testing/kuttl/e2e/major-upgrade/33-annotate-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/33--annotate-cluster.yaml rename to testing/kuttl/e2e/major-upgrade/33-annotate-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade/34--restart-cluster.yaml b/testing/kuttl/e2e/major-upgrade/34-restart-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/34--restart-cluster.yaml rename to testing/kuttl/e2e/major-upgrade/34-restart-cluster.yaml diff --git a/testing/kuttl/e2e/major-upgrade/36--check-data-and-version.yaml b/testing/kuttl/e2e/major-upgrade/36-check-data-and-version.yaml similarity index 100% rename from testing/kuttl/e2e/major-upgrade/36--check-data-and-version.yaml rename to testing/kuttl/e2e/major-upgrade/36-check-data-and-version.yaml diff --git a/testing/kuttl/e2e/optional-backups/00--cluster.yaml b/testing/kuttl/e2e/optional-backups/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/optional-backups/00--cluster.yaml rename to testing/kuttl/e2e/optional-backups/00-cluster.yaml diff --git a/testing/kuttl/e2e/optional-backups/04--cluster.yaml b/testing/kuttl/e2e/optional-backups/04-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/optional-backups/04--cluster.yaml rename to testing/kuttl/e2e/optional-backups/04-cluster.yaml diff --git a/testing/kuttl/e2e/optional-backups/10--cluster.yaml b/testing/kuttl/e2e/optional-backups/10-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/optional-backups/10--cluster.yaml rename to testing/kuttl/e2e/optional-backups/10-cluster.yaml diff --git a/testing/kuttl/e2e/optional-backups/20--cluster.yaml b/testing/kuttl/e2e/optional-backups/20-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/optional-backups/20--cluster.yaml rename to testing/kuttl/e2e/optional-backups/20-cluster.yaml diff --git a/testing/kuttl/e2e/optional-backups/22--cluster.yaml b/testing/kuttl/e2e/optional-backups/22-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/optional-backups/22--cluster.yaml rename to testing/kuttl/e2e/optional-backups/22-cluster.yaml diff --git a/testing/kuttl/e2e/password-change/00--cluster.yaml b/testing/kuttl/e2e/password-change/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/00--cluster.yaml rename to testing/kuttl/e2e/password-change/00-cluster.yaml diff --git a/testing/kuttl/e2e/password-change/01--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/01-psql-connect-uri.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/01--psql-connect-uri.yaml rename to testing/kuttl/e2e/password-change/01-psql-connect-uri.yaml diff --git a/testing/kuttl/e2e/password-change/01--psql-connect.yaml b/testing/kuttl/e2e/password-change/01-psql-connect.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/01--psql-connect.yaml rename to testing/kuttl/e2e/password-change/01-psql-connect.yaml diff --git a/testing/kuttl/e2e/password-change/02--secret.yaml b/testing/kuttl/e2e/password-change/02-secret.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/02--secret.yaml rename to testing/kuttl/e2e/password-change/02-secret.yaml diff --git a/testing/kuttl/e2e/password-change/03--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/03-psql-connect-uri.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/03--psql-connect-uri.yaml rename to testing/kuttl/e2e/password-change/03-psql-connect-uri.yaml diff --git a/testing/kuttl/e2e/password-change/03--psql-connect.yaml b/testing/kuttl/e2e/password-change/03-psql-connect.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/03--psql-connect.yaml rename to testing/kuttl/e2e/password-change/03-psql-connect.yaml diff --git a/testing/kuttl/e2e/password-change/04--secret.yaml b/testing/kuttl/e2e/password-change/04-secret.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/04--secret.yaml rename to testing/kuttl/e2e/password-change/04-secret.yaml diff --git a/testing/kuttl/e2e/password-change/05--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/05-psql-connect-uri.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/05--psql-connect-uri.yaml rename to testing/kuttl/e2e/password-change/05-psql-connect-uri.yaml diff --git a/testing/kuttl/e2e/password-change/05--psql-connect.yaml b/testing/kuttl/e2e/password-change/05-psql-connect.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/05--psql-connect.yaml rename to testing/kuttl/e2e/password-change/05-psql-connect.yaml diff --git a/testing/kuttl/e2e/password-change/06--cluster.yaml b/testing/kuttl/e2e/password-change/06-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/06--cluster.yaml rename to testing/kuttl/e2e/password-change/06-cluster.yaml diff --git a/testing/kuttl/e2e/password-change/07--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/07-psql-connect-uri.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/07--psql-connect-uri.yaml rename to testing/kuttl/e2e/password-change/07-psql-connect-uri.yaml diff --git a/testing/kuttl/e2e/password-change/07--psql-connect.yaml b/testing/kuttl/e2e/password-change/07-psql-connect.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/07--psql-connect.yaml rename to testing/kuttl/e2e/password-change/07-psql-connect.yaml diff --git a/testing/kuttl/e2e/password-change/08--secret.yaml b/testing/kuttl/e2e/password-change/08-secret.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/08--secret.yaml rename to testing/kuttl/e2e/password-change/08-secret.yaml diff --git a/testing/kuttl/e2e/password-change/09--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/09-psql-connect-uri.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/09--psql-connect-uri.yaml rename to testing/kuttl/e2e/password-change/09-psql-connect-uri.yaml diff --git a/testing/kuttl/e2e/password-change/09--psql-connect.yaml b/testing/kuttl/e2e/password-change/09-psql-connect.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/09--psql-connect.yaml rename to testing/kuttl/e2e/password-change/09-psql-connect.yaml diff --git a/testing/kuttl/e2e/password-change/10--secret.yaml b/testing/kuttl/e2e/password-change/10-secret.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/10--secret.yaml rename to testing/kuttl/e2e/password-change/10-secret.yaml diff --git a/testing/kuttl/e2e/password-change/11--psql-connect-uri.yaml b/testing/kuttl/e2e/password-change/11-psql-connect-uri.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/11--psql-connect-uri.yaml rename to testing/kuttl/e2e/password-change/11-psql-connect-uri.yaml diff --git a/testing/kuttl/e2e/password-change/11--psql-connect.yaml b/testing/kuttl/e2e/password-change/11-psql-connect.yaml similarity index 100% rename from testing/kuttl/e2e/password-change/11--psql-connect.yaml rename to testing/kuttl/e2e/password-change/11-psql-connect.yaml diff --git a/testing/kuttl/e2e/pgadmin/01--cluster.yaml b/testing/kuttl/e2e/pgadmin/01-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgadmin/01--cluster.yaml rename to testing/kuttl/e2e/pgadmin/01-cluster.yaml diff --git a/testing/kuttl/e2e/pgadmin/02--check-settings.yaml b/testing/kuttl/e2e/pgadmin/02-check-settings.yaml similarity index 100% rename from testing/kuttl/e2e/pgadmin/02--check-settings.yaml rename to testing/kuttl/e2e/pgadmin/02-check-settings.yaml diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-backup-standby/00--cluster.yaml rename to testing/kuttl/e2e/pgbackrest-backup-standby/00-cluster.yaml diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/01-check-backup-logs.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-backup-standby/01--check-backup-logs.yaml rename to testing/kuttl/e2e/pgbackrest-backup-standby/01-check-backup-logs.yaml diff --git a/testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml b/testing/kuttl/e2e/pgbackrest-backup-standby/02-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-backup-standby/02--cluster.yaml rename to testing/kuttl/e2e/pgbackrest-backup-standby/02-cluster.yaml diff --git a/testing/kuttl/e2e/pgbackrest-init/00--cluster.yaml b/testing/kuttl/e2e/pgbackrest-init/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-init/00--cluster.yaml rename to testing/kuttl/e2e/pgbackrest-init/00-cluster.yaml diff --git a/testing/kuttl/e2e/pgbackrest-init/02--cluster.yaml b/testing/kuttl/e2e/pgbackrest-init/02-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-init/02--cluster.yaml rename to testing/kuttl/e2e/pgbackrest-init/02-cluster.yaml diff --git a/testing/kuttl/e2e/pgbackrest-init/04--cluster.yaml b/testing/kuttl/e2e/pgbackrest-init/04-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-init/04--cluster.yaml rename to testing/kuttl/e2e/pgbackrest-init/04-cluster.yaml diff --git a/testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml b/testing/kuttl/e2e/pgbackrest-init/06-check-spool-path.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-init/06--check-spool-path.yaml rename to testing/kuttl/e2e/pgbackrest-init/06-check-spool-path.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/01--create-cluster.yaml b/testing/kuttl/e2e/pgbackrest-restore/01-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/01--create-cluster.yaml rename to testing/kuttl/e2e/pgbackrest-restore/01-create-cluster.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/02--create-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/02-create-data.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/02--create-data.yaml rename to testing/kuttl/e2e/pgbackrest-restore/02-create-data.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/03--backup.yaml b/testing/kuttl/e2e/pgbackrest-restore/03-backup.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/03--backup.yaml rename to testing/kuttl/e2e/pgbackrest-restore/03-backup.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/04--clone-cluster.yaml b/testing/kuttl/e2e/pgbackrest-restore/04-clone-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/04--clone-cluster.yaml rename to testing/kuttl/e2e/pgbackrest-restore/04-clone-cluster.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/05--check-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/05-check-data.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/05--check-data.yaml rename to testing/kuttl/e2e/pgbackrest-restore/05-check-data.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/06--delete-clone.yaml b/testing/kuttl/e2e/pgbackrest-restore/06-delete-clone.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/06--delete-clone.yaml rename to testing/kuttl/e2e/pgbackrest-restore/06-delete-clone.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/07--annotate.yaml b/testing/kuttl/e2e/pgbackrest-restore/07-annotate.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/07--annotate.yaml rename to testing/kuttl/e2e/pgbackrest-restore/07-annotate.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/07--update-cluster.yaml b/testing/kuttl/e2e/pgbackrest-restore/07-update-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/07--update-cluster.yaml rename to testing/kuttl/e2e/pgbackrest-restore/07-update-cluster.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/08--wait-restart.yaml b/testing/kuttl/e2e/pgbackrest-restore/08-wait-restart.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/08--wait-restart.yaml rename to testing/kuttl/e2e/pgbackrest-restore/08-wait-restart.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/09--add-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/09-add-data.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/09--add-data.yaml rename to testing/kuttl/e2e/pgbackrest-restore/09-add-data.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/10--wait-archived.yaml b/testing/kuttl/e2e/pgbackrest-restore/10-wait-archived.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/10--wait-archived.yaml rename to testing/kuttl/e2e/pgbackrest-restore/10-wait-archived.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/11--clone-cluster.yaml b/testing/kuttl/e2e/pgbackrest-restore/11-clone-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/11--clone-cluster.yaml rename to testing/kuttl/e2e/pgbackrest-restore/11-clone-cluster.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/12--check-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/12-check-data.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/12--check-data.yaml rename to testing/kuttl/e2e/pgbackrest-restore/12-check-data.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/13--delete-clone.yaml b/testing/kuttl/e2e/pgbackrest-restore/13-delete-clone.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/13--delete-clone.yaml rename to testing/kuttl/e2e/pgbackrest-restore/13-delete-clone.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/14--lose-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/14-lose-data.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/14--lose-data.yaml rename to testing/kuttl/e2e/pgbackrest-restore/14-lose-data.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/15--in-place-pitr.yaml b/testing/kuttl/e2e/pgbackrest-restore/15-in-place-pitr.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/15--in-place-pitr.yaml rename to testing/kuttl/e2e/pgbackrest-restore/15-in-place-pitr.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/16--check-data.yaml b/testing/kuttl/e2e/pgbackrest-restore/16-check-data.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/16--check-data.yaml rename to testing/kuttl/e2e/pgbackrest-restore/16-check-data.yaml diff --git a/testing/kuttl/e2e/pgbackrest-restore/17--check-replication.yaml b/testing/kuttl/e2e/pgbackrest-restore/17-check-replication.yaml similarity index 100% rename from testing/kuttl/e2e/pgbackrest-restore/17--check-replication.yaml rename to testing/kuttl/e2e/pgbackrest-restore/17-check-replication.yaml diff --git a/testing/kuttl/e2e/pgbouncer/00--cluster.yaml b/testing/kuttl/e2e/pgbouncer/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/pgbouncer/00--cluster.yaml rename to testing/kuttl/e2e/pgbouncer/00-cluster.yaml diff --git a/testing/kuttl/e2e/pgbouncer/01--psql-connect.yaml b/testing/kuttl/e2e/pgbouncer/01-psql-connect.yaml similarity index 100% rename from testing/kuttl/e2e/pgbouncer/01--psql-connect.yaml rename to testing/kuttl/e2e/pgbouncer/01-psql-connect.yaml diff --git a/testing/kuttl/e2e/pgbouncer/10--read-certificate.yaml b/testing/kuttl/e2e/pgbouncer/10-read-certificate.yaml similarity index 100% rename from testing/kuttl/e2e/pgbouncer/10--read-certificate.yaml rename to testing/kuttl/e2e/pgbouncer/10-read-certificate.yaml diff --git a/testing/kuttl/e2e/pgbouncer/11--open-connection.yaml b/testing/kuttl/e2e/pgbouncer/11-open-connection.yaml similarity index 100% rename from testing/kuttl/e2e/pgbouncer/11--open-connection.yaml rename to testing/kuttl/e2e/pgbouncer/11-open-connection.yaml diff --git a/testing/kuttl/e2e/pgbouncer/12--rotate-certificate.yaml b/testing/kuttl/e2e/pgbouncer/12-rotate-certificate.yaml similarity index 100% rename from testing/kuttl/e2e/pgbouncer/12--rotate-certificate.yaml rename to testing/kuttl/e2e/pgbouncer/12-rotate-certificate.yaml diff --git a/testing/kuttl/e2e/pgbouncer/13--read-certificate.yaml b/testing/kuttl/e2e/pgbouncer/13-read-certificate.yaml similarity index 100% rename from testing/kuttl/e2e/pgbouncer/13--read-certificate.yaml rename to testing/kuttl/e2e/pgbouncer/13-read-certificate.yaml diff --git a/testing/kuttl/e2e/pgbouncer/14--compare-certificate.yaml b/testing/kuttl/e2e/pgbouncer/14-compare-certificate.yaml similarity index 100% rename from testing/kuttl/e2e/pgbouncer/14--compare-certificate.yaml rename to testing/kuttl/e2e/pgbouncer/14-compare-certificate.yaml diff --git a/testing/kuttl/e2e/pgbouncer/15--check-connection.yaml b/testing/kuttl/e2e/pgbouncer/15-check-connection.yaml similarity index 100% rename from testing/kuttl/e2e/pgbouncer/15--check-connection.yaml rename to testing/kuttl/e2e/pgbouncer/15-check-connection.yaml diff --git a/testing/kuttl/e2e/pgbouncer/16--reconnect.yaml b/testing/kuttl/e2e/pgbouncer/16-reconnect.yaml similarity index 100% rename from testing/kuttl/e2e/pgbouncer/16--reconnect.yaml rename to testing/kuttl/e2e/pgbouncer/16-reconnect.yaml diff --git a/testing/kuttl/e2e/replica-read/00--cluster.yaml b/testing/kuttl/e2e/replica-read/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/replica-read/00--cluster.yaml rename to testing/kuttl/e2e/replica-read/00-cluster.yaml diff --git a/testing/kuttl/e2e/replica-read/01--psql-replica-read.yaml b/testing/kuttl/e2e/replica-read/01-psql-replica-read.yaml similarity index 100% rename from testing/kuttl/e2e/replica-read/01--psql-replica-read.yaml rename to testing/kuttl/e2e/replica-read/01-psql-replica-read.yaml diff --git a/testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml b/testing/kuttl/e2e/root-cert-ownership/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/root-cert-ownership/00--cluster.yaml rename to testing/kuttl/e2e/root-cert-ownership/00-cluster.yaml diff --git a/testing/kuttl/e2e/root-cert-ownership/01--check-owners.yaml b/testing/kuttl/e2e/root-cert-ownership/01-check-owners.yaml similarity index 100% rename from testing/kuttl/e2e/root-cert-ownership/01--check-owners.yaml rename to testing/kuttl/e2e/root-cert-ownership/01-check-owners.yaml diff --git a/testing/kuttl/e2e/root-cert-ownership/02--delete-owner1.yaml b/testing/kuttl/e2e/root-cert-ownership/02-delete-owner1.yaml similarity index 100% rename from testing/kuttl/e2e/root-cert-ownership/02--delete-owner1.yaml rename to testing/kuttl/e2e/root-cert-ownership/02-delete-owner1.yaml diff --git a/testing/kuttl/e2e/root-cert-ownership/03--check-owners.yaml b/testing/kuttl/e2e/root-cert-ownership/03-check-owners.yaml similarity index 100% rename from testing/kuttl/e2e/root-cert-ownership/03--check-owners.yaml rename to testing/kuttl/e2e/root-cert-ownership/03-check-owners.yaml diff --git a/testing/kuttl/e2e/root-cert-ownership/04--delete-owner2.yaml b/testing/kuttl/e2e/root-cert-ownership/04-delete-owner2.yaml similarity index 100% rename from testing/kuttl/e2e/root-cert-ownership/04--delete-owner2.yaml rename to testing/kuttl/e2e/root-cert-ownership/04-delete-owner2.yaml diff --git a/testing/kuttl/e2e/root-cert-ownership/05--check-secret.yaml b/testing/kuttl/e2e/root-cert-ownership/05-check-secret.yaml similarity index 100% rename from testing/kuttl/e2e/root-cert-ownership/05--check-secret.yaml rename to testing/kuttl/e2e/root-cert-ownership/05-check-secret.yaml diff --git a/testing/kuttl/e2e/scaledown/00--create-cluster.yaml b/testing/kuttl/e2e/scaledown/00-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/scaledown/00--create-cluster.yaml rename to testing/kuttl/e2e/scaledown/00-create-cluster.yaml diff --git a/testing/kuttl/e2e/scaledown/01--update-cluster.yaml b/testing/kuttl/e2e/scaledown/01-update-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/scaledown/01--update-cluster.yaml rename to testing/kuttl/e2e/scaledown/01-update-cluster.yaml diff --git a/testing/kuttl/e2e/scaledown/02--delete-cluster.yaml b/testing/kuttl/e2e/scaledown/02-delete-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/scaledown/02--delete-cluster.yaml rename to testing/kuttl/e2e/scaledown/02-delete-cluster.yaml diff --git a/testing/kuttl/e2e/scaledown/10--create-cluster.yaml b/testing/kuttl/e2e/scaledown/10-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/scaledown/10--create-cluster.yaml rename to testing/kuttl/e2e/scaledown/10-create-cluster.yaml diff --git a/testing/kuttl/e2e/scaledown/12--update-cluster.yaml b/testing/kuttl/e2e/scaledown/12-update-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/scaledown/12--update-cluster.yaml rename to testing/kuttl/e2e/scaledown/12-update-cluster.yaml diff --git a/testing/kuttl/e2e/scaledown/13--delete-cluster.yaml b/testing/kuttl/e2e/scaledown/13-delete-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/scaledown/13--delete-cluster.yaml rename to testing/kuttl/e2e/scaledown/13-delete-cluster.yaml diff --git a/testing/kuttl/e2e/scaledown/20--create-cluster.yaml b/testing/kuttl/e2e/scaledown/20-create-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/scaledown/20--create-cluster.yaml rename to testing/kuttl/e2e/scaledown/20-create-cluster.yaml diff --git a/testing/kuttl/e2e/scaledown/21--update-cluster.yaml b/testing/kuttl/e2e/scaledown/21-update-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/scaledown/21--update-cluster.yaml rename to testing/kuttl/e2e/scaledown/21-update-cluster.yaml diff --git a/testing/kuttl/e2e/scaledown/readme.MD b/testing/kuttl/e2e/scaledown/readme.MD index 44fd880ed1..dd7f8fed7e 100644 --- a/testing/kuttl/e2e/scaledown/readme.MD +++ b/testing/kuttl/e2e/scaledown/readme.MD @@ -8,24 +8,24 @@ have the expected number of pods. ### From two sets to one set -* 00--create-cluster: create the cluster with two instance sets, one replica each +* 00-create-cluster: create the cluster with two instance sets, one replica each * 00-assert: check that the cluster exists with the expected status -* 01--update-cluster: update the cluster to remove one instance set +* 01-update-cluster: update the cluster to remove one instance set * 01-assert: check that the cluster exists with the expected status -* 02--delete-cluster +* 02-delete-cluster ### From one set with multiple replicas to one set with one replica -* 10--create-cluster: create the cluster with one instance set with two replicas +* 10-create-cluster: create the cluster with one instance set with two replicas * 10-assert: check that the cluster exists with the expected status * 11-annotate: set the roles as labels on the pods -* 12--update-cluster: update the cluster to remove one replica +* 12-update-cluster: update the cluster to remove one replica * 12-assert: check that the cluster exists with the expected status; and that the `master` pod that exists was the `master` before the scaledown -* 13--delete-cluster: delete the cluster +* 13-delete-cluster: delete the cluster ### From two sets with variable replicas to two set with one replica each -* 20--create-cluster: create the cluster with two instance sets, with two and one replica +* 20-create-cluster: create the cluster with two instance sets, with two and one replica * 20-assert: check that the cluster exists with the expected status -* 21--update-cluster: update the cluster to reduce the two-replica instance to one-replica +* 21-update-cluster: update the cluster to reduce the two-replica instance to one-replica * 21-assert: check that the cluster exists with the expected status diff --git a/testing/kuttl/e2e/security-context/00--cluster.yaml b/testing/kuttl/e2e/security-context/00-cluster.yaml similarity index 100% rename from testing/kuttl/e2e/security-context/00--cluster.yaml rename to testing/kuttl/e2e/security-context/00-cluster.yaml diff --git a/testing/kuttl/e2e/security-context/01--security-context.yaml b/testing/kuttl/e2e/security-context/01-security-context.yaml similarity index 100% rename from testing/kuttl/e2e/security-context/01--security-context.yaml rename to testing/kuttl/e2e/security-context/01-security-context.yaml diff --git a/testing/kuttl/e2e/security-context/10--kyverno.yaml b/testing/kuttl/e2e/security-context/10-kyverno.yaml similarity index 100% rename from testing/kuttl/e2e/security-context/10--kyverno.yaml rename to testing/kuttl/e2e/security-context/10-kyverno.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml deleted file mode 100644 index c86a544166..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/00--create-cluster.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/00-cluster.yaml -assert: -- files/00-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml deleted file mode 100644 index bbddba56c2..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/01--user-schema.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -commands: -# ensure the user schema is created for pgAdmin to use - - script: | - PRIMARY=$( - kubectl get pod --namespace "${NAMESPACE}" \ - --output name --selector ' - postgres-operator.crunchydata.com/cluster=elephant, - postgres-operator.crunchydata.com/role=master' - ) - kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" \ - -- psql -qAt -d elephant --command 'CREATE SCHEMA elephant AUTHORIZATION elephant' diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml deleted file mode 100644 index 0ef15853af..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/02--create-pgadmin.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/02-pgadmin.yaml -assert: -- files/02-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml deleted file mode 100644 index 6a25871f63..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/03-assert.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - PRIMARY=$( - kubectl get pod --namespace "${NAMESPACE}" \ - --output name --selector ' - postgres-operator.crunchydata.com/cluster=elephant, - postgres-operator.crunchydata.com/role=master' - ) - - NUM_USERS=$( - kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- \ - psql -qAt -d elephant --command 'select count(*) from elephant.user' \ - ) - - if [[ ${NUM_USERS} != 1 ]]; then - echo >&2 'Expected 1 user' - echo "got ${NUM_USERS}" - exit 1 - fi diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml deleted file mode 100644 index f8aaf480fd..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/04--update-pgadmin.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/04-pgadmin.yaml -assert: -- files/04-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml deleted file mode 100644 index 4d31c5db18..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/05-assert.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -# timeout: 120 -commands: -- script: | - PRIMARY=$( - kubectl get pod --namespace "${NAMESPACE}" \ - --output name --selector ' - postgres-operator.crunchydata.com/cluster=elephant, - postgres-operator.crunchydata.com/role=master' - ) - - NUM_USERS=$( - kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- \ - psql -qAt -d elephant --command 'select count(*) from elephant.user' \ - ) - - if [[ ${NUM_USERS} != 2 ]]; then - echo >&2 'Expected 2 user' - echo "got ${NUM_USERS}" - exit 1 - fi - - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - USER_LIST=$( - kubectl exec --namespace "${NAMESPACE}" "${PRIMARY}" -- \ - psql -qAt -d elephant --command 'select email from elephant.user;' \ - ) - - { - contains "${USER_LIST}" "john.doe@example.com" - } || { - echo >&2 'User john.doe@example.com not found. Got:' - echo "${USER_LIST}" - exit 1 - } diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md b/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md deleted file mode 100644 index 2d7688ae3b..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/README.md +++ /dev/null @@ -1,26 +0,0 @@ -# pgAdmin external database tests - -Notes: -- Due to the (random) namespace being part of the host, we cannot check the configmap using the usual assert/file pattern. -- These tests will only work with pgAdmin version v8 and higher - -## create postgrescluster and add user schema -* 00: - * create a postgrescluster with a label; - * check that the cluster has the label and that the expected user secret is created. -* 01: - * create the user schema for pgAdmin to use - - ## create pgadmin and verify connection to database -* 02: - * create a pgadmin with a selector for the existing cluster's label; - * check the correct existence of the secret, configmap, and pod. -* 03: - * check that pgAdmin only has one user - - ## add a pgadmin user and verify it in the database -* 04: - * update pgadmin with a new user; - * check that the pod is still running as expected. -* 05: - * check that pgAdmin now has two users and that the defined user is present. diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml deleted file mode 100644 index 8ae250152f..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster-check.yaml +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: elephant - labels: - sometest: test1 -status: - instances: - - name: instance1 - readyReplicas: 1 - replicas: 1 - updatedReplicas: 1 ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - postgres-operator.crunchydata.com/cluster: elephant - postgres-operator.crunchydata.com/pguser: elephant - postgres-operator.crunchydata.com/role: pguser -type: Opaque ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/cluster: elephant - postgres-operator.crunchydata.com/instance-set: instance1 - postgres-operator.crunchydata.com/role: master -status: - phase: Running diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml deleted file mode 100644 index 5f8678e5e9..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/00-cluster.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PostgresCluster -metadata: - name: elephant - labels: - sometest: test1 -spec: - postgresVersion: ${KUTTL_PG_VERSION} - instances: - - name: instance1 - dataVolumeClaimSpec: { accessModes: [ReadWriteOnce], resources: { requests: { storage: 1Gi } } } diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml deleted file mode 100644 index 6457b2ca20..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin-check.yaml +++ /dev/null @@ -1,29 +0,0 @@ ---- -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin1 ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/data: pgadmin - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin1 -status: - containerStatuses: - - name: pgadmin - ready: true - started: true - phase: Running ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin1 -type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml deleted file mode 100644 index f1e251b949..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/02-pgadmin.yaml +++ /dev/null @@ -1,20 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin1 -spec: - config: - configDatabaseURI: - name: elephant-pguser-elephant - key: uri - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serverGroups: - - name: kuttl-test - postgresClusterSelector: - matchLabels: - sometest: test1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml deleted file mode 100644 index 3a3f459441..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin-check.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/data: pgadmin - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin1 -status: - containerStatuses: - - name: pgadmin - ready: true - started: true - phase: Running diff --git a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml deleted file mode 100644 index 2c62b58b4b..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-db-uri/files/04-pgadmin.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin1 -spec: - users: - - username: "john.doe@example.com" - passwordRef: - name: john-doe-password - key: password - config: - configDatabaseURI: - name: elephant-pguser-elephant - key: uri - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serverGroups: - - name: kuttl-test - postgresClusterSelector: - matchLabels: - sometest: test1 ---- -apiVersion: v1 -kind: Secret -metadata: - name: john-doe-password -type: Opaque -stringData: - password: password diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/00--pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/00--pgadmin.yaml deleted file mode 100644 index 9372467a93..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/00--pgadmin.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serviceName: pgadmin-service diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/00-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/00-assert.yaml deleted file mode 100644 index 758814cad2..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/00-assert.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: pgadmin-service - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin - ownerReferences: - - apiVersion: postgres-operator.crunchydata.com/v1beta1 - controller: true - kind: PGAdmin - name: pgadmin -spec: - selector: - postgres-operator.crunchydata.com/pgadmin: pgadmin - ports: - - port: 5050 - targetPort: 5050 - protocol: TCP - name: pgadmin-port - type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/01--update-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/01--update-service.yaml deleted file mode 100644 index 81db248fd4..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/01--update-service.yaml +++ /dev/null @@ -1,12 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serviceName: pgadmin-service-updated diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/01-assert.yaml deleted file mode 100644 index 2303ebe9bb..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/01-assert.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: pgadmin-service-updated - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -spec: - selector: - postgres-operator.crunchydata.com/pgadmin: pgadmin - ports: - - port: 5050 - targetPort: 5050 - protocol: TCP - name: pgadmin-port - type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/02--remove-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/02--remove-service.yaml deleted file mode 100644 index b8cbf4eb41..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/02--remove-service.yaml +++ /dev/null @@ -1,11 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/02-errors.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/02-errors.yaml deleted file mode 100644 index f2795c106d..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/02-errors.yaml +++ /dev/null @@ -1,16 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: pgadmin-service - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -spec: - selector: - postgres-operator.crunchydata.com/pgadmin: pgadmin - ports: - - port: 5050 - targetPort: 5050 - protocol: TCP - name: pgadmin-port - type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/10--manual-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/10--manual-service.yaml deleted file mode 100644 index 88d8da6718..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/10--manual-service.yaml +++ /dev/null @@ -1,29 +0,0 @@ -# Manually create a service that should be taken over by pgAdmin -# The manual service is of type LoadBalancer -# Once taken over, the type should change to ClusterIP -apiVersion: v1 -kind: Service -metadata: - name: manual-pgadmin-service -spec: - ports: - - name: pgadmin-port - port: 5050 - protocol: TCP - selector: - postgres-operator.crunchydata.com/pgadmin: rhino - type: LoadBalancer ---- -# Create a pgAdmin that points to an existing un-owned service -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: manual-svc-pgadmin -spec: - serviceName: manual-pgadmin-service - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/10-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/10-assert.yaml deleted file mode 100644 index 95bf241b16..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/10-assert.yaml +++ /dev/null @@ -1,22 +0,0 @@ -# Check that the manually created service has the correct ownerReference -apiVersion: v1 -kind: Service -metadata: - name: manual-pgadmin-service - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: manual-svc-pgadmin - ownerReferences: - - apiVersion: postgres-operator.crunchydata.com/v1beta1 - controller: true - kind: PGAdmin - name: manual-svc-pgadmin -spec: - selector: - postgres-operator.crunchydata.com/pgadmin: manual-svc-pgadmin - ports: - - port: 5050 - targetPort: 5050 - protocol: TCP - name: pgadmin-port - type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/20--owned-service.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/20--owned-service.yaml deleted file mode 100644 index 04f211ffc7..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/20--owned-service.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Create a pgAdmin that will create and own a service -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin-service-owner -spec: - serviceName: pgadmin-owned-service - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/20-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/20-assert.yaml deleted file mode 100644 index a6ab1653bb..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/20-assert.yaml +++ /dev/null @@ -1,21 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: pgadmin-owned-service - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner - ownerReferences: - - apiVersion: postgres-operator.crunchydata.com/v1beta1 - controller: true - kind: PGAdmin - name: pgadmin-service-owner -spec: - selector: - postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner - ports: - - port: 5050 - targetPort: 5050 - protocol: TCP - name: pgadmin-port - type: ClusterIP diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/21--service-takeover-fails.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/21--service-takeover-fails.yaml deleted file mode 100644 index f992521ce8..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/21--service-takeover-fails.yaml +++ /dev/null @@ -1,13 +0,0 @@ -# Create a second pgAdmin that attempts to steal the service -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin-service-thief -spec: - serviceName: pgadmin-owned-service - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi diff --git a/testing/kuttl/e2e/standalone-pgadmin-service/21-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-service/21-assert.yaml deleted file mode 100644 index 060d669987..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-service/21-assert.yaml +++ /dev/null @@ -1,35 +0,0 @@ -# Original service should still have owner reference -apiVersion: v1 -kind: Service -metadata: - name: pgadmin-owned-service - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner - ownerReferences: - - apiVersion: postgres-operator.crunchydata.com/v1beta1 - controller: true - kind: PGAdmin - name: pgadmin-service-owner -spec: - selector: - postgres-operator.crunchydata.com/pgadmin: pgadmin-service-owner - ports: - - port: 5050 - targetPort: 5050 - protocol: TCP - name: pgadmin-port - type: ClusterIP ---- -# An event should be created for the failure to reconcile the Service -apiVersion: v1 -involvedObject: - apiVersion: postgres-operator.crunchydata.com/v1beta1 - kind: PGAdmin - name: pgadmin-service-thief -kind: Event -message: 'Failed to reconcile Service ServiceName: pgadmin-owned-service' -reason: InvalidServiceWarning -source: - component: pgadmin-controller -type: Warning diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml deleted file mode 100644 index ee1a03ec64..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/00--create-pgadmin.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/00-pgadmin.yaml -assert: -- files/00-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml deleted file mode 100644 index 244533b7ee..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/01-assert.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User -- script: | - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - - [ $bob_role = 1 ] && [ $dave_role = 2 ] || exit 1 - - users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) - - bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') - dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') - - $bob_is_admin && ! $dave_is_admin || exit 1 - - bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') - dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') - - [ "$bob_password" = "password123" ] && [ "$dave_password" = "password456" ] || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml deleted file mode 100644 index 0ef15853af..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/02--edit-pgadmin-users.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/02-pgadmin.yaml -assert: -- files/02-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml deleted file mode 100644 index 01aff25b3b..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/03-assert.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User -- script: | - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') - - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 - - users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) - - bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') - dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') - jimi_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="jimi@example.com") | .isAdmin') - - $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 - - bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') - dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') - jimi_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="jimi@example.com") | .password') - - [ "$bob_password" = "password123" ] && [ "$dave_password" = "password456" ] && [ "$jimi_password" = "password789" ] || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml deleted file mode 100644 index f8aaf480fd..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/04--change-pgadmin-user-passwords.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/04-pgadmin.yaml -assert: -- files/04-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml deleted file mode 100644 index 1dca13a7b7..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/05-assert.yaml +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User -- script: | - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') - - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 - - users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) - - bob_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="bob@example.com") | .isAdmin') - dave_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="dave@example.com") | .isAdmin') - jimi_is_admin=$(printf '%s\n' $users_in_secret | jq '.[] | select(.username=="jimi@example.com") | .isAdmin') - - $bob_is_admin && $dave_is_admin && ! $jimi_is_admin || exit 1 - - bob_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="bob@example.com") | .password') - dave_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="dave@example.com") | .password') - jimi_password=$(printf '%s\n' $users_in_secret | jq -r '.[] | select(.username=="jimi@example.com") | .password') - - [ "$bob_password" = "NEWpassword123" ] && [ "$dave_password" = "NEWpassword456" ] && [ "$jimi_password" = "NEWpassword789" ] || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml deleted file mode 100644 index a538b7dca4..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/06--delete-pgadmin-users.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/06-pgadmin.yaml -assert: -- files/06-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml deleted file mode 100644 index 5c0e7267e6..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/07-assert.yaml +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -# When setup.py returns users in Json, the Role translation is 1 for Admin, 2 for User -- script: | - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - secret_name=$(kubectl get secret -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - users_in_pgadmin=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py get-users --json") - - bob_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="bob@example.com") | .role') - dave_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="dave@example.com") | .role') - jimi_role=$(printf '%s\n' $users_in_pgadmin | jq '.[] | select(.username=="jimi@example.com") | .role') - - [ $bob_role = 1 ] && [ $dave_role = 1 ] && [ $jimi_role = 2 ] || exit 1 - - users_in_secret=$(kubectl get "${secret_name}" -n "${NAMESPACE}" -o 'go-template={{index .data "users.json" }}' | base64 -d) - - $(printf '%s\n' $users_in_secret | jq '. == []') || exit 1 diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md b/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md deleted file mode 100644 index 0bbdfc2893..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# pgAdmin User Management tests - -*Note: These tests will only work with pgAdmin version v8 and higher* - -## Create pgAdmin with users - -* Start pgAdmin with a couple users -* Ensure users exist in pgAdmin with correct settings -* Ensure users exist in the `users.json` file in the pgAdmin secret with the correct settings - -## Edit pgAdmin users - -* Add a user and edit an existing user -* Ensure users exist in pgAdmin with correct settings -* Ensure users exist in the `users.json` file in the pgAdmin secret with the correct settings - -## Delete pgAdmin users - -* Remove users from pgAdmin spec -* Ensure users still exist in pgAdmin with correct settings -* Ensure users have been removed from the `users.json` file in the pgAdmin secret diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml deleted file mode 100644 index f2c7f28cd1..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin-check.yaml +++ /dev/null @@ -1,34 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/data: pgadmin - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -status: - containerStatuses: - - name: pgadmin - ready: true - started: true - phase: Running ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -type: Opaque ---- -apiVersion: v1 -kind: Secret -metadata: - name: bob-password-secret -type: Opaque ---- -apiVersion: v1 -kind: Secret -metadata: - name: dave-password-secret -type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml deleted file mode 100644 index ce86d8d894..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/00-pgadmin.yaml +++ /dev/null @@ -1,40 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serverGroups: [] - users: - - username: bob@example.com - role: Administrator - passwordRef: - name: bob-password-secret - key: password - - username: dave@example.com - passwordRef: - name: dave-password-secret - key: password ---- -apiVersion: v1 -kind: Secret -metadata: - name: bob-password-secret -type: Opaque -data: - # Password is "password123", base64 encoded - password: cGFzc3dvcmQxMjM= ---- -apiVersion: v1 -kind: Secret -metadata: - name: dave-password-secret -type: Opaque -data: - # Password is "password456", base64 encoded - password: cGFzc3dvcmQ0NTY= diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml deleted file mode 100644 index 9a07b0d994..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin-check.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/data: pgadmin - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -status: - containerStatuses: - - name: pgadmin - ready: true - started: true - phase: Running ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -type: Opaque ---- -apiVersion: v1 -kind: Secret -metadata: - name: bob-password-secret -type: Opaque ---- -apiVersion: v1 -kind: Secret -metadata: - name: dave-password-secret -type: Opaque ---- -apiVersion: v1 -kind: Secret -metadata: - name: jimi-password-secret -type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml deleted file mode 100644 index 88f75d8092..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/02-pgadmin.yaml +++ /dev/null @@ -1,54 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serverGroups: [] - users: - - username: bob@example.com - role: Administrator - passwordRef: - name: bob-password-secret - key: password - - username: dave@example.com - role: Administrator - passwordRef: - name: dave-password-secret - key: password - - username: jimi@example.com - passwordRef: - name: jimi-password-secret - key: password ---- -apiVersion: v1 -kind: Secret -metadata: - name: bob-password-secret -type: Opaque -data: - # Password is "password123", base64 encoded - password: cGFzc3dvcmQxMjM= ---- -apiVersion: v1 -kind: Secret -metadata: - name: dave-password-secret -type: Opaque -data: - # Password is "password456", base64 encoded - password: cGFzc3dvcmQ0NTY= ---- -apiVersion: v1 -kind: Secret -metadata: - name: jimi-password-secret -type: Opaque -data: - # Password is "password789", base64 encoded - password: cGFzc3dvcmQ3ODk= diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml deleted file mode 100644 index 9a07b0d994..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin-check.yaml +++ /dev/null @@ -1,40 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/data: pgadmin - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -status: - containerStatuses: - - name: pgadmin - ready: true - started: true - phase: Running ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -type: Opaque ---- -apiVersion: v1 -kind: Secret -metadata: - name: bob-password-secret -type: Opaque ---- -apiVersion: v1 -kind: Secret -metadata: - name: dave-password-secret -type: Opaque ---- -apiVersion: v1 -kind: Secret -metadata: - name: jimi-password-secret -type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml deleted file mode 100644 index 32b0081f92..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/04-pgadmin.yaml +++ /dev/null @@ -1,54 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serverGroups: [] - users: - - username: bob@example.com - role: Administrator - passwordRef: - name: bob-password-secret - key: password - - username: dave@example.com - role: Administrator - passwordRef: - name: dave-password-secret - key: password - - username: jimi@example.com - passwordRef: - name: jimi-password-secret - key: password ---- -apiVersion: v1 -kind: Secret -metadata: - name: bob-password-secret -type: Opaque -data: - # Password is "NEWpassword123", base64 encoded - password: TkVXcGFzc3dvcmQxMjM= ---- -apiVersion: v1 -kind: Secret -metadata: - name: dave-password-secret -type: Opaque -data: - # Password is "NEWpassword456", base64 encoded - password: TkVXcGFzc3dvcmQ0NTY= ---- -apiVersion: v1 -kind: Secret -metadata: - name: jimi-password-secret -type: Opaque -data: - # Password is "NEWpassword789", base64 encoded - password: TkVXcGFzc3dvcmQ3ODk= diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml deleted file mode 100644 index 04481fb4d1..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin-check.yaml +++ /dev/null @@ -1,22 +0,0 @@ ---- -apiVersion: v1 -kind: Pod -metadata: - labels: - postgres-operator.crunchydata.com/data: pgadmin - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -status: - containerStatuses: - - name: pgadmin - ready: true - started: true - phase: Running ---- -apiVersion: v1 -kind: Secret -metadata: - labels: - postgres-operator.crunchydata.com/role: pgadmin - postgres-operator.crunchydata.com/pgadmin: pgadmin -type: Opaque diff --git a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml deleted file mode 100644 index 0513edf050..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin-user-management/files/06-pgadmin.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: postgres-operator.crunchydata.com/v1beta1 -kind: PGAdmin -metadata: - name: pgadmin -spec: - dataVolumeClaimSpec: - accessModes: - - "ReadWriteOnce" - resources: - requests: - storage: 1Gi - serverGroups: [] - users: [] diff --git a/testing/kuttl/e2e/standalone-pgadmin/00--create-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin/00--create-pgadmin.yaml deleted file mode 100644 index ee1a03ec64..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/00--create-pgadmin.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/00-pgadmin.yaml -assert: -- files/00-pgadmin-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml deleted file mode 100644 index 6b7c8c8794..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/01-assert.yaml +++ /dev/null @@ -1,17 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected="\"Servers\": {}" - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - exit 1 - } diff --git a/testing/kuttl/e2e/standalone-pgadmin/02--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/02--create-cluster.yaml deleted file mode 100644 index bee91ce0a4..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/02--create-cluster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/02-cluster.yaml -- files/02-pgadmin.yaml -assert: -- files/02-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml deleted file mode 100644 index 169a8261eb..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/03-assert.yaml +++ /dev/null @@ -1,76 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -# Check the configmap is updated; -# Check the file is updated on the pod; -# Check the server dump is accurate. -# Because we have to wait for the configmap reload, make sure we have enough time. -timeout: 120 -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } - - data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n }\n }\n}\n"' - - data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) - - { - contains "${data_actual}" "${data_expected}" - } || { - echo "Wrong configmap: got ${data_actual}" - exit 1 - } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') - config_expected='"Servers": { - "1": { - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin1", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin1" - } - }' - { - contains "${config_updated}" "${config_expected}" - } || { - echo "Wrong file mounted: got ${config_updated}" - echo "Wrong file mounted: expected ${config_expected}" - sleep 10 - exit 1 - } - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected=' - { - "Servers": { - "1": { - "Name": "pgadmin1", - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin1", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - } - } - }' - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - echo "Wrong servers dumped: expected ${clusters_expected}" - diff_comp "${clusters_actual}" "${clusters_expected}" - exit 1 - } diff --git a/testing/kuttl/e2e/standalone-pgadmin/04--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/04--create-cluster.yaml deleted file mode 100644 index 5701678501..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/04--create-cluster.yaml +++ /dev/null @@ -1,6 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/04-cluster.yaml -assert: -- files/04-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml deleted file mode 100644 index 7fe5b69dc2..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/05-assert.yaml +++ /dev/null @@ -1,102 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -# Check the configmap is updated; -# Check the file is updated on the pod; -# Check the server dump is accurate. -# Because we have to wait for the configmap reload, make sure we have enough time. -timeout: 120 -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } - - data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin2-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin2\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin2\"\n }\n }\n}\n"' - - data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) - - { - contains "${data_actual}" "${data_expected}" - } || { - echo "Wrong configmap: got ${data_actual}" - diff_comp "${data_actual}" "${data_expected}" - exit 1 - } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') - config_expected='"Servers": { - "1": { - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin1", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin1" - }, - "2": { - "Group": "groupOne", - "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin2", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin2" - } - }' - { - contains "${config_updated}" "${config_expected}" - } || { - echo "Wrong file mounted: got ${config_updated}" - echo "Wrong file mounted: expected ${config_expected}" - diff_comp "${config_updated}" "${config_expected}" - sleep 10 - exit 1 - } - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected=' - { - "Servers": { - "1": { - "Name": "pgadmin1", - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin1", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - }, - "2": { - "Name": "pgadmin2", - "Group": "groupOne", - "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin2", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - } - } - }' - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - echo "Wrong servers dumped: expected ${clusters_expected}" - diff_comp "${clusters_actual}" "${clusters_expected}" - exit 1 - } diff --git a/testing/kuttl/e2e/standalone-pgadmin/06--create-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/06--create-cluster.yaml deleted file mode 100644 index 86b5f8bf04..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/06--create-cluster.yaml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -apply: -- files/06-cluster.yaml -- files/06-pgadmin.yaml -assert: -- files/06-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml deleted file mode 100644 index 323237cad4..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/07-assert.yaml +++ /dev/null @@ -1,126 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -# Check the configmap is updated; -# Check the file is updated on the pod; -# Check the server dump is accurate. -# Because we have to wait for the configmap reload, make sure we have enough time. -timeout: 120 -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } - - data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin2-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin2\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin2\"\n },\n \"3\": {\n \"Group\": \"groupTwo\",\n \"Host\": \"pgadmin3-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin3\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin3\"\n }\n }\n}\n"' - - data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) - - { - contains "${data_actual}" "${data_expected}" - } || { - echo "Wrong configmap: got ${data_actual}" - diff_comp "${data_actual}" "${data_expected}" - exit 1 - } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') - config_expected='"Servers": { - "1": { - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin1", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin1" - }, - "2": { - "Group": "groupOne", - "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin2", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin2" - }, - "3": { - "Group": "groupTwo", - "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin3", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin3" - } - }' - { - contains "${config_updated}" "${config_expected}" - } || { - echo "Wrong file mounted: got ${config_updated}" - echo "Wrong file mounted: expected ${config_expected}" - diff_comp "${config_updated}" "${config_expected}" - sleep 10 - exit 1 - } - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected=' - { - "Servers": { - "1": { - "Name": "pgadmin1", - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin1", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - }, - "2": { - "Name": "pgadmin2", - "Group": "groupOne", - "Host": "pgadmin2-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin2", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - }, - "3": { - "Name": "pgadmin3", - "Group": "groupTwo", - "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin3", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - } - } - }' - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - echo "Wrong servers dumped: expected ${clusters_expected}" - diff_comp "${clusters_actual}" "${clusters_expected}" - exit 1 - } diff --git a/testing/kuttl/e2e/standalone-pgadmin/08--delete-cluster.yaml b/testing/kuttl/e2e/standalone-pgadmin/08--delete-cluster.yaml deleted file mode 100644 index bc11ea62f4..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/08--delete-cluster.yaml +++ /dev/null @@ -1,8 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -delete: - - apiVersion: postgres-operator.crunchydata.com/v1beta1 - kind: PostgresCluster - name: pgadmin2 -error: -- files/04-cluster-check.yaml diff --git a/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml b/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml deleted file mode 100644 index eca5581cb7..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/09-assert.yaml +++ /dev/null @@ -1,102 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestAssert -# Check the configmap is updated; -# Check the file is updated on the pod; -# Check the server dump is accurate. -# Because we have to wait for the configmap reload, make sure we have enough time. -timeout: 120 -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } - - data_expected='"pgadmin-shared-clusters.json": "{\n \"Servers\": {\n \"1\": {\n \"Group\": \"groupOne\",\n \"Host\": \"pgadmin1-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin1\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin1\"\n },\n \"2\": {\n \"Group\": \"groupTwo\",\n \"Host\": \"pgadmin3-primary.'${NAMESPACE}.svc'\",\n \"MaintenanceDB\": \"postgres\",\n \"Name\": \"pgadmin3\",\n \"Port\": 5432,\n \"SSLMode\": \"prefer\",\n \"Shared\": true,\n \"Username\": \"pgadmin3\"\n }\n }\n}\n"' - - data_actual=$(kubectl get cm -l postgres-operator.crunchydata.com/pgadmin=pgadmin -n "${NAMESPACE}" -o json | jq .items[0].data) - - { - contains "${data_actual}" "${data_expected}" - } || { - echo "Wrong configmap: got ${data_actual}" - diff_comp "${data_actual}" "${data_expected}" - exit 1 - } - - pod_name=$(kubectl get pod -n "${NAMESPACE}" -l postgres-operator.crunchydata.com/pgadmin=pgadmin -o name) - - config_updated=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c 'cat /etc/pgadmin/conf.d/~postgres-operator/pgadmin-shared-clusters.json') - config_expected='"Servers": { - "1": { - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin1", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin1" - }, - "2": { - "Group": "groupTwo", - "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", - "MaintenanceDB": "postgres", - "Name": "pgadmin3", - "Port": 5432, - "SSLMode": "prefer", - "Shared": true, - "Username": "pgadmin3" - } - }' - { - contains "${config_updated}" "${config_expected}" - } || { - echo "Wrong file mounted: got ${config_updated}" - echo "Wrong file mounted: expected ${config_expected}" - diff_comp "${config_updated}" "${config_expected}" - sleep 10 - exit 1 - } - - clusters_actual=$(kubectl exec -n "${NAMESPACE}" "${pod_name}" -- bash -c "python3 /usr/local/lib/python3.11/site-packages/pgadmin4/setup.py dump-servers /tmp/dumped.json --user admin@pgadmin.${NAMESPACE}.svc && cat /tmp/dumped.json") - - clusters_expected=' - { - "Servers": { - "1": { - "Name": "pgadmin1", - "Group": "groupOne", - "Host": "pgadmin1-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin1", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - }, - "2": { - "Name": "pgadmin3", - "Group": "groupTwo", - "Host": "pgadmin3-primary.'${NAMESPACE}.svc'", - "Port": 5432, - "MaintenanceDB": "postgres", - "Username": "pgadmin3", - "Shared": true, - "TunnelPort": "22", - "KerberosAuthentication": false, - "ConnectionParameters": { - "sslmode": "prefer" - } - } - } - }' - { - contains "${clusters_actual}" "${clusters_expected}" - } || { - echo "Wrong servers dumped: got ${clusters_actual}" - echo "Wrong servers dumped: expected ${clusters_expected}" - diff_comp "${clusters_actual}" "${clusters_expected}" - exit 1 - } diff --git a/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml b/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml deleted file mode 100644 index 118b8d06ef..0000000000 --- a/testing/kuttl/e2e/standalone-pgadmin/10-invalid-pgadmin.yaml +++ /dev/null @@ -1,37 +0,0 @@ -apiVersion: kuttl.dev/v1beta1 -kind: TestStep -# Check that invalid spec cannot be applied. -commands: -- script: | - contains() { bash -ceu '[[ "$1" == *"$2"* ]]' - "$@"; } - diff_comp() { bash -ceu 'diff <(echo "$1" ) <(echo "$2")' - "$@"; } - - data_expected='"pgadmin2" is invalid: spec.serverGroups[0]: Invalid value: "object": exactly one of "postgresClusterName" or "postgresClusterSelector" is required' - - data_actual=$(kubectl apply -f - 2>&1 <