mirror of
https://github.com/benbjohnson/litestream.git
synced 2026-01-25 05:06:30 +00:00
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
282
.github/workflows/manual-integration-tests.yml
vendored
282
.github/workflows/manual-integration-tests.yml
vendored
@@ -41,6 +41,16 @@ on:
|
|||||||
required: false
|
required: false
|
||||||
default: false
|
default: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
test_b2:
|
||||||
|
description: 'Run Backblaze B2 integration tests'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
test_multipart:
|
||||||
|
description: 'Run multipart upload stress tests (5MB-50MB files)'
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
setup:
|
setup:
|
||||||
@@ -73,7 +83,9 @@ jobs:
|
|||||||
[ "${{ github.event.inputs.test_gcs }}" != "true" ] && \
|
[ "${{ github.event.inputs.test_gcs }}" != "true" ] && \
|
||||||
[ "${{ github.event.inputs.test_abs }}" != "true" ] && \
|
[ "${{ github.event.inputs.test_abs }}" != "true" ] && \
|
||||||
[ "${{ github.event.inputs.test_tigris }}" != "true" ] && \
|
[ "${{ github.event.inputs.test_tigris }}" != "true" ] && \
|
||||||
[ "${{ github.event.inputs.test_r2 }}" != "true" ]; then
|
[ "${{ github.event.inputs.test_r2 }}" != "true" ] && \
|
||||||
|
[ "${{ github.event.inputs.test_b2 }}" != "true" ] && \
|
||||||
|
[ "${{ github.event.inputs.test_multipart }}" != "true" ]; then
|
||||||
echo "::error::At least one test type must be selected"
|
echo "::error::At least one test type must be selected"
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
@@ -88,6 +100,8 @@ jobs:
|
|||||||
echo "- Azure Blob Storage: ${{ github.event.inputs.test_abs }}" >> $GITHUB_STEP_SUMMARY
|
echo "- Azure Blob Storage: ${{ github.event.inputs.test_abs }}" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "- Fly.io Tigris: ${{ github.event.inputs.test_tigris }}" >> $GITHUB_STEP_SUMMARY
|
echo "- Fly.io Tigris: ${{ github.event.inputs.test_tigris }}" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "- Cloudflare R2: ${{ github.event.inputs.test_r2 }}" >> $GITHUB_STEP_SUMMARY
|
echo "- Cloudflare R2: ${{ github.event.inputs.test_r2 }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Backblaze B2: ${{ github.event.inputs.test_b2 }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "- Multipart Stress Tests: ${{ github.event.inputs.test_multipart }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
s3-integration:
|
s3-integration:
|
||||||
name: S3 Integration Tests (AWS)
|
name: S3 Integration Tests (AWS)
|
||||||
@@ -98,19 +112,37 @@ jobs:
|
|||||||
group: integration-test-s3-manual
|
group: integration-test-s3-manual
|
||||||
cancel-in-progress: false
|
cancel-in-progress: false
|
||||||
steps:
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}" ]; then
|
||||||
|
echo "::notice title=Skipped::S3 integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
ref: ${{ needs.setup.outputs.ref }}
|
ref: ${{ needs.setup.outputs.ref }}
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
go-version-file: "go.mod"
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
- run: go env
|
- name: Show Go environment
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go env
|
||||||
|
|
||||||
- run: go install ./cmd/litestream
|
- name: Install litestream
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go install ./cmd/litestream
|
||||||
|
|
||||||
- run: go test -v ./replica_client_test.go -integration -replica-clients=s3
|
- name: Run S3 integration tests
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=s3
|
||||||
env:
|
env:
|
||||||
LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}
|
LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}
|
||||||
LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }}
|
LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }}
|
||||||
@@ -141,19 +173,37 @@ jobs:
|
|||||||
group: integration-test-tigris-manual
|
group: integration-test-tigris-manual
|
||||||
cancel-in-progress: false
|
cancel-in-progress: false
|
||||||
steps:
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_TIGRIS_ACCESS_KEY_ID }}" ]; then
|
||||||
|
echo "::notice title=Skipped::Tigris integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
ref: ${{ needs.setup.outputs.ref }}
|
ref: ${{ needs.setup.outputs.ref }}
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
go-version-file: "go.mod"
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
- run: go env
|
- name: Show Go environment
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go env
|
||||||
|
|
||||||
- run: go install ./cmd/litestream
|
- name: Install litestream
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go install ./cmd/litestream
|
||||||
|
|
||||||
- run: go test -v ./replica_client_test.go -integration -replica-clients=tigris
|
- name: Run Tigris integration tests
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=tigris
|
||||||
env:
|
env:
|
||||||
LITESTREAM_TIGRIS_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_TIGRIS_ACCESS_KEY_ID }}
|
LITESTREAM_TIGRIS_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_TIGRIS_ACCESS_KEY_ID }}
|
||||||
LITESTREAM_TIGRIS_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_TIGRIS_SECRET_ACCESS_KEY }}
|
LITESTREAM_TIGRIS_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_TIGRIS_SECRET_ACCESS_KEY }}
|
||||||
@@ -182,19 +232,37 @@ jobs:
|
|||||||
group: integration-test-r2-manual
|
group: integration-test-r2-manual
|
||||||
cancel-in-progress: false
|
cancel-in-progress: false
|
||||||
steps:
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_R2_ACCESS_KEY_ID }}" ]; then
|
||||||
|
echo "::notice title=Skipped::R2 integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
ref: ${{ needs.setup.outputs.ref }}
|
ref: ${{ needs.setup.outputs.ref }}
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
go-version-file: "go.mod"
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
- run: go env
|
- name: Show Go environment
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go env
|
||||||
|
|
||||||
- run: go install ./cmd/litestream
|
- name: Install litestream
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go install ./cmd/litestream
|
||||||
|
|
||||||
- run: go test -v ./replica_client_test.go -integration -replica-clients=r2
|
- name: Run R2 integration tests
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=r2
|
||||||
env:
|
env:
|
||||||
LITESTREAM_R2_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_R2_ACCESS_KEY_ID }}
|
LITESTREAM_R2_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_R2_ACCESS_KEY_ID }}
|
||||||
LITESTREAM_R2_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_R2_SECRET_ACCESS_KEY }}
|
LITESTREAM_R2_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_R2_SECRET_ACCESS_KEY }}
|
||||||
@@ -216,6 +284,131 @@ jobs:
|
|||||||
*.log
|
*.log
|
||||||
test-results/
|
test-results/
|
||||||
|
|
||||||
|
b2-integration:
|
||||||
|
name: Backblaze B2 Integration Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: setup
|
||||||
|
if: github.event.inputs.test_b2 == 'true'
|
||||||
|
concurrency:
|
||||||
|
group: integration-test-b2-manual
|
||||||
|
cancel-in-progress: false
|
||||||
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_B2_KEY_ID }}" ]; then
|
||||||
|
echo "::notice title=Skipped::B2 integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
with:
|
||||||
|
ref: ${{ needs.setup.outputs.ref }}
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
with:
|
||||||
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
|
- name: Show Go environment
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go env
|
||||||
|
|
||||||
|
- name: Install litestream
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go install ./cmd/litestream
|
||||||
|
|
||||||
|
- name: Run B2 integration tests
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=b2
|
||||||
|
env:
|
||||||
|
LITESTREAM_B2_KEY_ID: ${{ secrets.LITESTREAM_B2_KEY_ID }}
|
||||||
|
LITESTREAM_B2_APPLICATION_KEY: ${{ secrets.LITESTREAM_B2_APPLICATION_KEY }}
|
||||||
|
LITESTREAM_B2_ENDPOINT: ${{ secrets.LITESTREAM_B2_ENDPOINT }}
|
||||||
|
LITESTREAM_B2_BUCKET: ${{ secrets.LITESTREAM_B2_BUCKET }}
|
||||||
|
|
||||||
|
- name: Create test results directory
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
mkdir -p test-results
|
||||||
|
echo "Test completed at $(date)" > test-results/b2-test.log
|
||||||
|
|
||||||
|
- name: Upload test results
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: b2-test-results
|
||||||
|
path: |
|
||||||
|
*.log
|
||||||
|
test-results/
|
||||||
|
|
||||||
|
multipart-stress:
|
||||||
|
name: Multipart Upload Stress Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: setup
|
||||||
|
if: github.event.inputs.test_multipart == 'true'
|
||||||
|
concurrency:
|
||||||
|
group: integration-test-multipart-manual
|
||||||
|
cancel-in-progress: false
|
||||||
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}" ]; then
|
||||||
|
echo "::notice title=Skipped::Multipart stress tests skipped - S3 credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
with:
|
||||||
|
ref: ${{ needs.setup.outputs.ref }}
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
with:
|
||||||
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
|
- name: Show Go environment
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go env
|
||||||
|
|
||||||
|
- name: Install litestream
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go install ./cmd/litestream
|
||||||
|
|
||||||
|
- name: Run multipart threshold tests (AWS S3)
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: |
|
||||||
|
go test -v ./replica_client_test.go -integration -replica-clients=s3 \
|
||||||
|
-run "TestReplicaClient_S3_Multipart|TestReplicaClient_S3_Concurrency" \
|
||||||
|
-timeout 30m
|
||||||
|
env:
|
||||||
|
LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}
|
||||||
|
LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }}
|
||||||
|
LITESTREAM_S3_REGION: us-east-1
|
||||||
|
LITESTREAM_S3_BUCKET: integration.litestream.io
|
||||||
|
|
||||||
|
- name: Create test results directory
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
mkdir -p test-results
|
||||||
|
echo "Test completed at $(date)" > test-results/multipart-test.log
|
||||||
|
|
||||||
|
- name: Upload test results
|
||||||
|
if: always()
|
||||||
|
uses: actions/upload-artifact@v4
|
||||||
|
with:
|
||||||
|
name: multipart-test-results
|
||||||
|
path: |
|
||||||
|
*.log
|
||||||
|
test-results/
|
||||||
|
|
||||||
gcs-integration:
|
gcs-integration:
|
||||||
name: Google Cloud Storage Integration Tests
|
name: Google Cloud Storage Integration Tests
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -225,25 +418,44 @@ jobs:
|
|||||||
group: integration-test-gcp-manual
|
group: integration-test-gcp-manual
|
||||||
cancel-in-progress: false
|
cancel-in-progress: false
|
||||||
steps:
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.GOOGLE_APPLICATION_CREDENTIALS }}" ]; then
|
||||||
|
echo "::notice title=Skipped::GCS integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- name: Extract GCP credentials
|
- name: Extract GCP credentials
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json'
|
run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json'
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
env:
|
||||||
GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}}
|
GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}}
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
ref: ${{ needs.setup.outputs.ref }}
|
ref: ${{ needs.setup.outputs.ref }}
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
go-version-file: "go.mod"
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
- run: go env
|
- name: Show Go environment
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go env
|
||||||
|
|
||||||
- run: go install ./cmd/litestream
|
- name: Install litestream
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go install ./cmd/litestream
|
||||||
|
|
||||||
- run: go test -v ./replica_client_test.go -integration -replica-clients=gs
|
- name: Run GCS integration tests
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=gs
|
||||||
env:
|
env:
|
||||||
GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json
|
GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json
|
||||||
LITESTREAM_GS_BUCKET: litestream-github-workflows
|
LITESTREAM_GS_BUCKET: litestream-github-workflows
|
||||||
@@ -272,19 +484,37 @@ jobs:
|
|||||||
group: integration-test-abs-manual
|
group: integration-test-abs-manual
|
||||||
cancel-in-progress: false
|
cancel-in-progress: false
|
||||||
steps:
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }}" ]; then
|
||||||
|
echo "::notice title=Skipped::Azure Blob Storage integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
- uses: actions/checkout@v4
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
ref: ${{ needs.setup.outputs.ref }}
|
ref: ${{ needs.setup.outputs.ref }}
|
||||||
|
|
||||||
- uses: actions/setup-go@v5
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
with:
|
with:
|
||||||
go-version-file: "go.mod"
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
- run: go env
|
- name: Show Go environment
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go env
|
||||||
|
|
||||||
- run: go install ./cmd/litestream
|
- name: Install litestream
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go install ./cmd/litestream
|
||||||
|
|
||||||
- run: go test -v ./replica_client_test.go -integration -replica-clients=abs
|
- name: Run Azure Blob Storage integration tests
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=abs
|
||||||
env:
|
env:
|
||||||
LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }}
|
LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }}
|
||||||
LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }}
|
LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }}
|
||||||
@@ -308,7 +538,7 @@ jobs:
|
|||||||
summary:
|
summary:
|
||||||
name: Test Summary
|
name: Test Summary
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
needs: [setup, s3-integration, gcs-integration, abs-integration, tigris-integration, r2-integration]
|
needs: [setup, s3-integration, gcs-integration, abs-integration, tigris-integration, r2-integration, b2-integration, multipart-stress]
|
||||||
if: always()
|
if: always()
|
||||||
steps:
|
steps:
|
||||||
- name: Download all artifacts
|
- name: Download all artifacts
|
||||||
@@ -364,6 +594,22 @@ jobs:
|
|||||||
echo "⏭️ **Cloudflare R2:** Skipped" >> $GITHUB_STEP_SUMMARY
|
echo "⏭️ **Cloudflare R2:** Skipped" >> $GITHUB_STEP_SUMMARY
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
if [ "${{ needs.b2-integration.result }}" == "success" ]; then
|
||||||
|
echo "✅ **Backblaze B2:** Passed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
elif [ "${{ needs.b2-integration.result }}" == "failure" ]; then
|
||||||
|
echo "❌ **Backblaze B2:** Failed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
elif [ "${{ needs.b2-integration.result }}" == "skipped" ]; then
|
||||||
|
echo "⏭️ **Backblaze B2:** Skipped" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${{ needs.multipart-stress.result }}" == "success" ]; then
|
||||||
|
echo "✅ **Multipart Stress Tests:** Passed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
elif [ "${{ needs.multipart-stress.result }}" == "failure" ]; then
|
||||||
|
echo "❌ **Multipart Stress Tests:** Failed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
elif [ "${{ needs.multipart-stress.result }}" == "skipped" ]; then
|
||||||
|
echo "⏭️ **Multipart Stress Tests:** Skipped" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
echo "" >> $GITHUB_STEP_SUMMARY
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "---" >> $GITHUB_STEP_SUMMARY
|
echo "---" >> $GITHUB_STEP_SUMMARY
|
||||||
echo "**Triggered by:** @${{ github.actor }}" >> $GITHUB_STEP_SUMMARY
|
echo "**Triggered by:** @${{ github.actor }}" >> $GITHUB_STEP_SUMMARY
|
||||||
@@ -388,6 +634,8 @@ jobs:
|
|||||||
if ('${{ needs.abs-integration.result }}' === 'failure') failedJobs.push('Azure');
|
if ('${{ needs.abs-integration.result }}' === 'failure') failedJobs.push('Azure');
|
||||||
if ('${{ needs.tigris-integration.result }}' === 'failure') failedJobs.push('Tigris');
|
if ('${{ needs.tigris-integration.result }}' === 'failure') failedJobs.push('Tigris');
|
||||||
if ('${{ needs.r2-integration.result }}' === 'failure') failedJobs.push('R2');
|
if ('${{ needs.r2-integration.result }}' === 'failure') failedJobs.push('R2');
|
||||||
|
if ('${{ needs.b2-integration.result }}' === 'failure') failedJobs.push('B2');
|
||||||
|
if ('${{ needs.multipart-stress.result }}' === 'failure') failedJobs.push('Multipart');
|
||||||
|
|
||||||
if (failedJobs.length > 0) {
|
if (failedJobs.length > 0) {
|
||||||
statusEmoji = '❌';
|
statusEmoji = '❌';
|
||||||
|
|||||||
454
.github/workflows/pre-release-checklist.yml
vendored
Normal file
454
.github/workflows/pre-release-checklist.yml
vendored
Normal file
@@ -0,0 +1,454 @@
|
|||||||
|
name: Pre-Release Checklist
|
||||||
|
|
||||||
|
# Advisory workflow to verify release readiness
|
||||||
|
# This workflow reports results but does NOT block releases
|
||||||
|
# Run manually before tagging a new release
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
issues: write
|
||||||
|
pull-requests: write
|
||||||
|
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
version:
|
||||||
|
description: 'Version being released (e.g., v0.5.6)'
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
create_issue:
|
||||||
|
description: 'Create a GitHub issue with results'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
test_cloud_providers:
|
||||||
|
description: 'Run cloud provider integration tests (S3, GCS, ABS, R2)'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
test_multipart:
|
||||||
|
description: 'Run multipart upload stress tests'
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
type: boolean
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
unit-tests:
|
||||||
|
name: Unit Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
result: ${{ steps.test.outcome }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
|
- name: Run unit tests
|
||||||
|
id: test
|
||||||
|
run: go test -v -race ./...
|
||||||
|
|
||||||
|
build-verification:
|
||||||
|
name: Build Verification
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
outputs:
|
||||||
|
result: ${{ steps.build.outcome }}
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
with:
|
||||||
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
|
- name: Build main binary
|
||||||
|
id: build
|
||||||
|
run: |
|
||||||
|
go build -o bin/litestream ./cmd/litestream
|
||||||
|
./bin/litestream version
|
||||||
|
|
||||||
|
- name: Verify binary runs
|
||||||
|
run: |
|
||||||
|
./bin/litestream --help
|
||||||
|
./bin/litestream databases --help
|
||||||
|
./bin/litestream replicate --help
|
||||||
|
./bin/litestream restore --help
|
||||||
|
|
||||||
|
s3-integration:
|
||||||
|
name: S3 Integration (AWS)
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event.inputs.test_cloud_providers == 'true'
|
||||||
|
outputs:
|
||||||
|
result: ${{ steps.test.outcome }}
|
||||||
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}" ]; then
|
||||||
|
echo "::notice title=Skipped::S3 integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
with:
|
||||||
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
|
- name: Run S3 integration tests
|
||||||
|
id: test
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
continue-on-error: true
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=s3
|
||||||
|
env:
|
||||||
|
LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}
|
||||||
|
LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }}
|
||||||
|
LITESTREAM_S3_REGION: us-east-1
|
||||||
|
LITESTREAM_S3_BUCKET: integration.litestream.io
|
||||||
|
|
||||||
|
gcs-integration:
|
||||||
|
name: GCS Integration
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event.inputs.test_cloud_providers == 'true'
|
||||||
|
outputs:
|
||||||
|
result: ${{ steps.test.outcome }}
|
||||||
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.GOOGLE_APPLICATION_CREDENTIALS }}" ]; then
|
||||||
|
echo "::notice title=Skipped::GCS integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- name: Extract GCP credentials
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
run: 'echo "$GOOGLE_APPLICATION_CREDENTIALS" > /opt/gcp.json'
|
||||||
|
shell: bash
|
||||||
|
env:
|
||||||
|
GOOGLE_APPLICATION_CREDENTIALS: ${{secrets.GOOGLE_APPLICATION_CREDENTIALS}}
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
with:
|
||||||
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
|
- name: Run GCS integration tests
|
||||||
|
id: test
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
continue-on-error: true
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=gs
|
||||||
|
env:
|
||||||
|
GOOGLE_APPLICATION_CREDENTIALS: /opt/gcp.json
|
||||||
|
LITESTREAM_GS_BUCKET: litestream-github-workflows
|
||||||
|
|
||||||
|
abs-integration:
|
||||||
|
name: Azure Blob Storage Integration
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event.inputs.test_cloud_providers == 'true'
|
||||||
|
outputs:
|
||||||
|
result: ${{ steps.test.outcome }}
|
||||||
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }}" ]; then
|
||||||
|
echo "::notice title=Skipped::Azure Blob Storage integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
with:
|
||||||
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
|
- name: Run ABS integration tests
|
||||||
|
id: test
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
continue-on-error: true
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=abs
|
||||||
|
env:
|
||||||
|
LITESTREAM_ABS_ACCOUNT_NAME: ${{ secrets.LITESTREAM_ABS_ACCOUNT_NAME }}
|
||||||
|
LITESTREAM_ABS_ACCOUNT_KEY: ${{ secrets.LITESTREAM_ABS_ACCOUNT_KEY }}
|
||||||
|
LITESTREAM_ABS_BUCKET: integration
|
||||||
|
|
||||||
|
r2-integration:
|
||||||
|
name: Cloudflare R2 Integration
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event.inputs.test_cloud_providers == 'true'
|
||||||
|
outputs:
|
||||||
|
result: ${{ steps.test.outcome }}
|
||||||
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_R2_ACCESS_KEY_ID }}" ]; then
|
||||||
|
echo "::notice title=Skipped::R2 integration tests skipped - credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
with:
|
||||||
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
|
- name: Run R2 integration tests
|
||||||
|
id: test
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
continue-on-error: true
|
||||||
|
run: go test -v ./replica_client_test.go -integration -replica-clients=r2
|
||||||
|
env:
|
||||||
|
LITESTREAM_R2_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_R2_ACCESS_KEY_ID }}
|
||||||
|
LITESTREAM_R2_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_R2_SECRET_ACCESS_KEY }}
|
||||||
|
LITESTREAM_R2_ENDPOINT: ${{ secrets.LITESTREAM_R2_ENDPOINT }}
|
||||||
|
LITESTREAM_R2_BUCKET: ${{ secrets.LITESTREAM_R2_BUCKET }}
|
||||||
|
|
||||||
|
multipart-stress:
|
||||||
|
name: Multipart Upload Stress Tests
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
if: github.event.inputs.test_multipart == 'true'
|
||||||
|
outputs:
|
||||||
|
result: ${{ steps.test.outcome }}
|
||||||
|
steps:
|
||||||
|
- name: Check for required secrets
|
||||||
|
id: check-secrets
|
||||||
|
run: |
|
||||||
|
if [ -z "${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}" ]; then
|
||||||
|
echo "::notice title=Skipped::Multipart stress tests skipped - S3 credentials not configured"
|
||||||
|
echo "has_secrets=false" >> $GITHUB_OUTPUT
|
||||||
|
else
|
||||||
|
echo "has_secrets=true" >> $GITHUB_OUTPUT
|
||||||
|
fi
|
||||||
|
|
||||||
|
- uses: actions/checkout@v4
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
|
||||||
|
- uses: actions/setup-go@v5
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
with:
|
||||||
|
go-version-file: "go.mod"
|
||||||
|
|
||||||
|
- name: Run multipart stress tests
|
||||||
|
id: test
|
||||||
|
if: steps.check-secrets.outputs.has_secrets == 'true'
|
||||||
|
continue-on-error: true
|
||||||
|
run: |
|
||||||
|
go test -v ./replica_client_test.go -integration -replica-clients=s3 \
|
||||||
|
-run "TestReplicaClient_S3_Multipart|TestReplicaClient_S3_Concurrency" \
|
||||||
|
-timeout 30m
|
||||||
|
env:
|
||||||
|
LITESTREAM_S3_ACCESS_KEY_ID: ${{ secrets.LITESTREAM_S3_ACCESS_KEY_ID }}
|
||||||
|
LITESTREAM_S3_SECRET_ACCESS_KEY: ${{ secrets.LITESTREAM_S3_SECRET_ACCESS_KEY }}
|
||||||
|
LITESTREAM_S3_REGION: us-east-1
|
||||||
|
LITESTREAM_S3_BUCKET: integration.litestream.io
|
||||||
|
|
||||||
|
generate-checklist:
|
||||||
|
name: Generate Release Checklist
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs:
|
||||||
|
- unit-tests
|
||||||
|
- build-verification
|
||||||
|
- s3-integration
|
||||||
|
- gcs-integration
|
||||||
|
- abs-integration
|
||||||
|
- r2-integration
|
||||||
|
- multipart-stress
|
||||||
|
if: always()
|
||||||
|
steps:
|
||||||
|
- name: Generate checklist summary
|
||||||
|
id: checklist
|
||||||
|
run: |
|
||||||
|
echo "## Pre-Release Checklist for ${{ github.event.inputs.version }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Date:** $(date -u '+%Y-%m-%d %H:%M:%S UTC')" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Triggered by:** @${{ github.actor }}" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
echo "### Core Tests" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
if [ "${{ needs.unit-tests.result }}" == "success" ]; then
|
||||||
|
echo "- [x] Unit tests passed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
else
|
||||||
|
echo "- [ ] Unit tests **FAILED**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${{ needs.build-verification.result }}" == "success" ]; then
|
||||||
|
echo "- [x] Build verification passed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
else
|
||||||
|
echo "- [ ] Build verification **FAILED**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "### Cloud Provider Integration Tests" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
if [ "${{ needs.s3-integration.result }}" == "success" ]; then
|
||||||
|
echo "- [x] AWS S3 integration passed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
elif [ "${{ needs.s3-integration.result }}" == "skipped" ]; then
|
||||||
|
echo "- [ ] AWS S3 integration (skipped)" >> $GITHUB_STEP_SUMMARY
|
||||||
|
else
|
||||||
|
echo "- [ ] AWS S3 integration **FAILED**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${{ needs.gcs-integration.result }}" == "success" ]; then
|
||||||
|
echo "- [x] Google Cloud Storage integration passed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
elif [ "${{ needs.gcs-integration.result }}" == "skipped" ]; then
|
||||||
|
echo "- [ ] Google Cloud Storage integration (skipped)" >> $GITHUB_STEP_SUMMARY
|
||||||
|
else
|
||||||
|
echo "- [ ] Google Cloud Storage integration **FAILED**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${{ needs.abs-integration.result }}" == "success" ]; then
|
||||||
|
echo "- [x] Azure Blob Storage integration passed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
elif [ "${{ needs.abs-integration.result }}" == "skipped" ]; then
|
||||||
|
echo "- [ ] Azure Blob Storage integration (skipped)" >> $GITHUB_STEP_SUMMARY
|
||||||
|
else
|
||||||
|
echo "- [ ] Azure Blob Storage integration **FAILED**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "${{ needs.r2-integration.result }}" == "success" ]; then
|
||||||
|
echo "- [x] Cloudflare R2 integration passed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
elif [ "${{ needs.r2-integration.result }}" == "skipped" ]; then
|
||||||
|
echo "- [ ] Cloudflare R2 integration (skipped)" >> $GITHUB_STEP_SUMMARY
|
||||||
|
else
|
||||||
|
echo "- [ ] Cloudflare R2 integration **FAILED**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "### Stress Tests" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
if [ "${{ needs.multipart-stress.result }}" == "success" ]; then
|
||||||
|
echo "- [x] Multipart upload stress tests passed" >> $GITHUB_STEP_SUMMARY
|
||||||
|
elif [ "${{ needs.multipart-stress.result }}" == "skipped" ]; then
|
||||||
|
echo "- [ ] Multipart upload stress tests (skipped)" >> $GITHUB_STEP_SUMMARY
|
||||||
|
else
|
||||||
|
echo "- [ ] Multipart upload stress tests **FAILED**" >> $GITHUB_STEP_SUMMARY
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "---" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "**Note:** This checklist is advisory only." >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "Review failed items before proceeding with the release." >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "" >> $GITHUB_STEP_SUMMARY
|
||||||
|
echo "[View full run details](https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }})" >> $GITHUB_STEP_SUMMARY
|
||||||
|
|
||||||
|
# Create issue body for later use
|
||||||
|
cat > /tmp/issue_body.md << 'ISSUE_EOF'
|
||||||
|
## Pre-Release Checklist for ${{ github.event.inputs.version }}
|
||||||
|
|
||||||
|
**Date:** $(date -u '+%Y-%m-%d %H:%M:%S UTC')
|
||||||
|
**Triggered by:** @${{ github.actor }}
|
||||||
|
**Workflow Run:** https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
|
||||||
|
|
||||||
|
### Core Tests
|
||||||
|
|
||||||
|
| Test | Status |
|
||||||
|
|------|--------|
|
||||||
|
| Unit Tests | ${{ needs.unit-tests.result == 'success' && '✅ Passed' || '❌ Failed' }} |
|
||||||
|
| Build Verification | ${{ needs.build-verification.result == 'success' && '✅ Passed' || '❌ Failed' }} |
|
||||||
|
|
||||||
|
### Cloud Provider Integration Tests
|
||||||
|
|
||||||
|
| Provider | Status |
|
||||||
|
|----------|--------|
|
||||||
|
| AWS S3 | ${{ needs.s3-integration.result == 'success' && '✅ Passed' || needs.s3-integration.result == 'skipped' && '⏭️ Skipped' || '❌ Failed' }} |
|
||||||
|
| Google Cloud Storage | ${{ needs.gcs-integration.result == 'success' && '✅ Passed' || needs.gcs-integration.result == 'skipped' && '⏭️ Skipped' || '❌ Failed' }} |
|
||||||
|
| Azure Blob Storage | ${{ needs.abs-integration.result == 'success' && '✅ Passed' || needs.abs-integration.result == 'skipped' && '⏭️ Skipped' || '❌ Failed' }} |
|
||||||
|
| Cloudflare R2 | ${{ needs.r2-integration.result == 'success' && '✅ Passed' || needs.r2-integration.result == 'skipped' && '⏭️ Skipped' || '❌ Failed' }} |
|
||||||
|
|
||||||
|
### Stress Tests
|
||||||
|
|
||||||
|
| Test | Status |
|
||||||
|
|------|--------|
|
||||||
|
| Multipart Uploads | ${{ needs.multipart-stress.result == 'success' && '✅ Passed' || needs.multipart-stress.result == 'skipped' && '⏭️ Skipped' || '❌ Failed' }} |
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
**Note:** This checklist is advisory only. Review any failed items before proceeding with the release.
|
||||||
|
ISSUE_EOF
|
||||||
|
|
||||||
|
- name: Create GitHub Issue
|
||||||
|
if: github.event.inputs.create_issue == 'true'
|
||||||
|
continue-on-error: true
|
||||||
|
uses: actions/github-script@v7
|
||||||
|
with:
|
||||||
|
script: |
|
||||||
|
const version = '${{ github.event.inputs.version }}';
|
||||||
|
const runUrl = `https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}`;
|
||||||
|
|
||||||
|
// Determine overall status
|
||||||
|
const results = {
|
||||||
|
unit: '${{ needs.unit-tests.result }}',
|
||||||
|
build: '${{ needs.build-verification.result }}',
|
||||||
|
s3: '${{ needs.s3-integration.result }}',
|
||||||
|
gcs: '${{ needs.gcs-integration.result }}',
|
||||||
|
abs: '${{ needs.abs-integration.result }}',
|
||||||
|
r2: '${{ needs.r2-integration.result }}',
|
||||||
|
multipart: '${{ needs.multipart-stress.result }}'
|
||||||
|
};
|
||||||
|
|
||||||
|
const failed = Object.entries(results)
|
||||||
|
.filter(([_, v]) => v === 'failure')
|
||||||
|
.map(([k, _]) => k);
|
||||||
|
|
||||||
|
const status = failed.length === 0 ? '✅' : '⚠️';
|
||||||
|
const title = `${status} Pre-Release Checklist: ${version}`;
|
||||||
|
|
||||||
|
let body = `## Pre-Release Checklist for ${version}\n\n`;
|
||||||
|
body += `**Workflow Run:** [View Details](${runUrl})\n\n`;
|
||||||
|
|
||||||
|
body += `### Results Summary\n\n`;
|
||||||
|
body += `| Test | Status |\n`;
|
||||||
|
body += `|------|--------|\n`;
|
||||||
|
body += `| Unit Tests | ${results.unit === 'success' ? '✅' : '❌'} |\n`;
|
||||||
|
body += `| Build | ${results.build === 'success' ? '✅' : '❌'} |\n`;
|
||||||
|
body += `| AWS S3 | ${results.s3 === 'success' ? '✅' : results.s3 === 'skipped' ? '⏭️' : '❌'} |\n`;
|
||||||
|
body += `| GCS | ${results.gcs === 'success' ? '✅' : results.gcs === 'skipped' ? '⏭️' : '❌'} |\n`;
|
||||||
|
body += `| Azure | ${results.abs === 'success' ? '✅' : results.abs === 'skipped' ? '⏭️' : '❌'} |\n`;
|
||||||
|
body += `| R2 | ${results.r2 === 'success' ? '✅' : results.r2 === 'skipped' ? '⏭️' : '❌'} |\n`;
|
||||||
|
body += `| Multipart | ${results.multipart === 'success' ? '✅' : results.multipart === 'skipped' ? '⏭️' : '❌'} |\n\n`;
|
||||||
|
|
||||||
|
if (failed.length > 0) {
|
||||||
|
body += `### ⚠️ Failed Tests\n\n`;
|
||||||
|
body += `The following tests failed and should be reviewed:\n`;
|
||||||
|
failed.forEach(f => body += `- ${f}\n`);
|
||||||
|
body += `\n`;
|
||||||
|
}
|
||||||
|
|
||||||
|
body += `---\n`;
|
||||||
|
body += `*This issue was automatically created by the pre-release checklist workflow.*\n`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
await github.rest.issues.create({
|
||||||
|
owner: context.repo.owner,
|
||||||
|
repo: context.repo.repo,
|
||||||
|
title: title,
|
||||||
|
body: body,
|
||||||
|
labels: ['release-checklist']
|
||||||
|
});
|
||||||
|
console.log('Created release checklist issue');
|
||||||
|
} catch (error) {
|
||||||
|
console.log(`Failed to create issue: ${error.message}`);
|
||||||
|
}
|
||||||
372
docs/PROVIDER_COMPATIBILITY.md
Normal file
372
docs/PROVIDER_COMPATIBILITY.md
Normal file
@@ -0,0 +1,372 @@
|
|||||||
|
# Storage Provider Compatibility Guide
|
||||||
|
|
||||||
|
This document details S3-compatible storage provider compatibility with Litestream,
|
||||||
|
including known limitations, required configuration, and tested configurations.
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
Litestream uses the AWS SDK v2 for S3-compatible storage backends. While most providers
|
||||||
|
implement the S3 API, there are important differences in behavior that can affect
|
||||||
|
Litestream's operation.
|
||||||
|
|
||||||
|
## Provider-Specific Configuration
|
||||||
|
|
||||||
|
### AWS S3 (Default)
|
||||||
|
|
||||||
|
**Status**: Fully supported (primary target)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path
|
||||||
|
region: us-east-1
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes**:
|
||||||
|
|
||||||
|
- No special configuration required
|
||||||
|
- All features fully supported
|
||||||
|
- Checksum validation enabled by default
|
||||||
|
|
||||||
|
### Cloudflare R2
|
||||||
|
|
||||||
|
**Status**: Supported with default configuration
|
||||||
|
|
||||||
|
**Known Limitations**:
|
||||||
|
|
||||||
|
- Strict concurrent upload limit (2-3 concurrent uploads max)
|
||||||
|
- Does not support `aws-chunked` content encoding
|
||||||
|
- Does not support request/response checksums
|
||||||
|
|
||||||
|
**Configuration**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path?endpoint=https://ACCOUNT_ID.r2.cloudflarestorage.com
|
||||||
|
access-key-id: your-access-key-id
|
||||||
|
secret-access-key: your-secret-access-key
|
||||||
|
```
|
||||||
|
|
||||||
|
**Automatic Defaults** (applied when R2 endpoint detected):
|
||||||
|
|
||||||
|
- `concurrency=2` - Limits concurrent multipart upload parts
|
||||||
|
- Checksums disabled automatically
|
||||||
|
- Force path style enabled
|
||||||
|
|
||||||
|
**Important**: The endpoint must use `https://` scheme for R2 detection to work.
|
||||||
|
|
||||||
|
Related issues: #948, #947, #940, #941
|
||||||
|
|
||||||
|
### Backblaze B2 (S3-Compatible API)
|
||||||
|
|
||||||
|
**Status**: Supported with configuration
|
||||||
|
|
||||||
|
**Known Limitations**:
|
||||||
|
|
||||||
|
- Requires signed payloads for all requests
|
||||||
|
- Specific authentication endpoint required
|
||||||
|
|
||||||
|
**Configuration**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path?endpoint=https://s3.REGION.backblazeb2.com&sign-payload=true&force-path-style=true
|
||||||
|
access-key-id: your-key-id
|
||||||
|
secret-access-key: your-application-key
|
||||||
|
```
|
||||||
|
|
||||||
|
**Required Settings**:
|
||||||
|
|
||||||
|
- `sign-payload=true` - Required for B2 authentication
|
||||||
|
- `force-path-style=true` - Required for bucket access
|
||||||
|
- Endpoint format: `https://s3.REGION.backblazeb2.com`
|
||||||
|
|
||||||
|
Related issues: #918, #894
|
||||||
|
|
||||||
|
### DigitalOcean Spaces
|
||||||
|
|
||||||
|
**Status**: Supported with configuration
|
||||||
|
|
||||||
|
**Known Limitations**:
|
||||||
|
|
||||||
|
- Does not support `aws-chunked` content encoding
|
||||||
|
- Signature requirements differ from AWS
|
||||||
|
|
||||||
|
**Configuration**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path?endpoint=https://REGION.digitaloceanspaces.com&force-path-style=false
|
||||||
|
access-key-id: your-spaces-key
|
||||||
|
secret-access-key: your-spaces-secret
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes**:
|
||||||
|
|
||||||
|
- Use virtual-hosted style paths (force-path-style=false)
|
||||||
|
- Checksum features disabled automatically for custom endpoints
|
||||||
|
|
||||||
|
Related issues: #943
|
||||||
|
|
||||||
|
### MinIO
|
||||||
|
|
||||||
|
**Status**: Fully supported
|
||||||
|
|
||||||
|
**Configuration**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path?endpoint=https://your-minio-server:9000&force-path-style=true
|
||||||
|
access-key-id: your-access-key
|
||||||
|
secret-access-key: your-secret-key
|
||||||
|
```
|
||||||
|
|
||||||
|
**Notes**:
|
||||||
|
|
||||||
|
- Works well with default settings
|
||||||
|
- Force path style recommended for single-server deployments
|
||||||
|
|
||||||
|
### Scaleway Object Storage
|
||||||
|
|
||||||
|
**Status**: Supported with configuration
|
||||||
|
|
||||||
|
**Known Limitations**:
|
||||||
|
|
||||||
|
- `MissingContentLength` errors with streaming uploads
|
||||||
|
- Requires Content-Length header
|
||||||
|
|
||||||
|
**Configuration**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path?endpoint=https://s3.REGION.scw.cloud&force-path-style=true
|
||||||
|
access-key-id: your-access-key
|
||||||
|
secret-access-key: your-secret-key
|
||||||
|
```
|
||||||
|
|
||||||
|
Related issues: #912
|
||||||
|
|
||||||
|
### Hetzner Object Storage
|
||||||
|
|
||||||
|
**Status**: Supported with configuration
|
||||||
|
|
||||||
|
**Known Limitations**:
|
||||||
|
|
||||||
|
- `InvalidArgument` errors with default AWS SDK settings
|
||||||
|
- Does not support `aws-chunked` content encoding
|
||||||
|
|
||||||
|
**Configuration**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path?endpoint=https://REGION.your-objectstorage.com&force-path-style=true
|
||||||
|
access-key-id: your-access-key
|
||||||
|
secret-access-key: your-secret-key
|
||||||
|
```
|
||||||
|
|
||||||
|
### Filebase
|
||||||
|
|
||||||
|
**Status**: Supported with configuration
|
||||||
|
|
||||||
|
**Known Limitations**:
|
||||||
|
|
||||||
|
- Authentication failures with default SDK settings after SDK v2 migration
|
||||||
|
|
||||||
|
**Configuration**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path?endpoint=https://s3.filebase.com&force-path-style=true
|
||||||
|
access-key-id: your-access-key
|
||||||
|
secret-access-key: your-secret-key
|
||||||
|
```
|
||||||
|
|
||||||
|
### Tigris
|
||||||
|
|
||||||
|
**Status**: Supported with configuration
|
||||||
|
|
||||||
|
**Configuration**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path?endpoint=https://fly.storage.tigris.dev&force-path-style=true
|
||||||
|
access-key-id: your-access-key
|
||||||
|
secret-access-key: your-secret-key
|
||||||
|
```
|
||||||
|
|
||||||
|
### Wasabi
|
||||||
|
|
||||||
|
**Status**: Supported
|
||||||
|
|
||||||
|
**Configuration**:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: s3://bucket-name/path?endpoint=https://s3.REGION.wasabisys.com
|
||||||
|
access-key-id: your-access-key
|
||||||
|
secret-access-key: your-secret-key
|
||||||
|
```
|
||||||
|
|
||||||
|
## Google Cloud Storage (GCS)
|
||||||
|
|
||||||
|
**Status**: Fully supported (native client)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: gcs://bucket-name/path
|
||||||
|
```
|
||||||
|
|
||||||
|
**Authentication**:
|
||||||
|
|
||||||
|
- Uses Application Default Credentials
|
||||||
|
- Set `GOOGLE_APPLICATION_CREDENTIALS` environment variable
|
||||||
|
- Or use workload identity on GCP
|
||||||
|
|
||||||
|
## Azure Blob Storage (ABS)
|
||||||
|
|
||||||
|
**Status**: Fully supported (native client)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: abs://container-name/path
|
||||||
|
account-name: your-account-name
|
||||||
|
account-key: your-account-key
|
||||||
|
```
|
||||||
|
|
||||||
|
**Alternative Authentication**:
|
||||||
|
|
||||||
|
- Connection string: `AZURE_STORAGE_CONNECTION_STRING`
|
||||||
|
- Managed identity on Azure
|
||||||
|
|
||||||
|
## Alibaba Cloud OSS
|
||||||
|
|
||||||
|
**Status**: Supported (native client)
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: oss://bucket-name/path?endpoint=oss-REGION.aliyuncs.com
|
||||||
|
access-key-id: your-access-key-id
|
||||||
|
access-key-secret: your-access-key-secret
|
||||||
|
```
|
||||||
|
|
||||||
|
## SFTP
|
||||||
|
|
||||||
|
**Status**: Supported
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
replicas:
|
||||||
|
- url: sftp://hostname/path
|
||||||
|
user: username
|
||||||
|
password: password # or use key-path
|
||||||
|
```
|
||||||
|
|
||||||
|
## Configuration Reference
|
||||||
|
|
||||||
|
### S3 Query Parameters
|
||||||
|
|
||||||
|
| Parameter | Description | Default |
|
||||||
|
|-----------|-------------|---------|
|
||||||
|
| `endpoint` | Custom S3 endpoint URL | AWS S3 |
|
||||||
|
| `region` | AWS region | Auto-detected |
|
||||||
|
| `force-path-style` | Use path-style URLs | `false` (auto for custom endpoints) |
|
||||||
|
| `sign-payload` | Sign request payloads | `false` |
|
||||||
|
| `skip-verify` | Skip TLS verification | `false` |
|
||||||
|
| `concurrency` | Multipart upload concurrency | `5` (2 for R2) |
|
||||||
|
| `part-size` | Multipart upload part size | `5MB` |
|
||||||
|
| `sse-algorithm` | Server-side encryption | None |
|
||||||
|
| `sse-kms-key-id` | KMS key for encryption | None |
|
||||||
|
| `sse-customer-key` | Customer-provided encryption key | None |
|
||||||
|
|
||||||
|
### Provider Detection
|
||||||
|
|
||||||
|
Litestream automatically detects certain providers and applies appropriate defaults:
|
||||||
|
|
||||||
|
| Provider | Detection Pattern | Applied Settings |
|
||||||
|
|----------|-------------------|------------------|
|
||||||
|
| Cloudflare R2 | `*.r2.cloudflarestorage.com` | `concurrency=2`, checksums disabled |
|
||||||
|
| Backblaze B2 | `*.backblazeb2.com` | `sign-payload=true`, `force-path-style=true` |
|
||||||
|
| DigitalOcean | `*.digitaloceanspaces.com` | `force-path-style=false` |
|
||||||
|
| Scaleway | `*.scw.cloud` | `force-path-style=true` |
|
||||||
|
| Filebase | `s3.filebase.com` | `force-path-style=true` |
|
||||||
|
| Tigris | `*.tigris.dev` | `force-path-style=true` |
|
||||||
|
| MinIO | `minio` in hostname | `force-path-style=true` |
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Errors
|
||||||
|
|
||||||
|
**`InvalidArgument: Unsupported content encoding: aws-chunked`**
|
||||||
|
|
||||||
|
- Provider doesn't support AWS SDK v2 chunked encoding
|
||||||
|
- Use a custom endpoint with automatic checksum disabling
|
||||||
|
- Or explicitly disable checksums
|
||||||
|
|
||||||
|
**`SignatureDoesNotMatch`**
|
||||||
|
|
||||||
|
- Try `sign-payload=true` in the URL
|
||||||
|
- Verify credentials are correct
|
||||||
|
- Check endpoint URL format
|
||||||
|
|
||||||
|
**`MissingContentLength`**
|
||||||
|
|
||||||
|
- Provider requires Content-Length header
|
||||||
|
- This is handled automatically for known providers
|
||||||
|
|
||||||
|
**`Too many concurrent uploads` or timeout errors**
|
||||||
|
|
||||||
|
- Reduce concurrency: `?concurrency=2`
|
||||||
|
- Particularly important for Cloudflare R2
|
||||||
|
|
||||||
|
**`AccessDenied` or authentication failures**
|
||||||
|
|
||||||
|
- Verify credentials
|
||||||
|
- Check IAM/bucket permissions
|
||||||
|
- For B2, ensure `sign-payload=true`
|
||||||
|
|
||||||
|
### Debug Mode
|
||||||
|
|
||||||
|
Enable verbose logging to diagnose issues:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
LITESTREAM_DEBUG=1 litestream replicate ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Or in configuration:
|
||||||
|
|
||||||
|
```yaml
|
||||||
|
logging:
|
||||||
|
level: debug
|
||||||
|
```
|
||||||
|
|
||||||
|
## Testing Your Configuration
|
||||||
|
|
||||||
|
Test connectivity without starting replication:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# List any existing backups
|
||||||
|
litestream snapshots s3://bucket/path?endpoint=...
|
||||||
|
|
||||||
|
# Perform a test restore (requires existing backup)
|
||||||
|
litestream restore -o /tmp/test.db s3://bucket/path?endpoint=...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Version Compatibility
|
||||||
|
|
||||||
|
- **Litestream v0.5.x**: AWS SDK v2, improved provider compatibility
|
||||||
|
- **Litestream v0.4.x**: AWS SDK v1, different authentication handling
|
||||||
|
- **Litestream v0.3.x**: Legacy format, not compatible with v0.5.x restores
|
||||||
|
|
||||||
|
When upgrading from v0.3.x, be aware that v0.5.x uses a different backup format
|
||||||
|
and cannot restore backups created by v0.3.x. See the upgrade guide for migration
|
||||||
|
instructions.
|
||||||
|
|
||||||
|
## Reporting Issues
|
||||||
|
|
||||||
|
When reporting provider compatibility issues, please include:
|
||||||
|
|
||||||
|
1. Provider name and region
|
||||||
|
2. Litestream version (`litestream version`)
|
||||||
|
3. Full error message
|
||||||
|
4. Configuration (with credentials redacted)
|
||||||
|
5. Whether the issue is with replication, restore, or both
|
||||||
|
|
||||||
|
File issues at: [GitHub Issues](https://github.com/benbjohnson/litestream/issues)
|
||||||
@@ -73,6 +73,14 @@ var (
|
|||||||
r2Bucket = flag.String("r2-bucket", os.Getenv("LITESTREAM_R2_BUCKET"), "")
|
r2Bucket = flag.String("r2-bucket", os.Getenv("LITESTREAM_R2_BUCKET"), "")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Backblaze B2 settings (S3-compatible)
|
||||||
|
var (
|
||||||
|
b2KeyID = flag.String("b2-key-id", os.Getenv("LITESTREAM_B2_KEY_ID"), "")
|
||||||
|
b2ApplicationKey = flag.String("b2-application-key", os.Getenv("LITESTREAM_B2_APPLICATION_KEY"), "")
|
||||||
|
b2Endpoint = flag.String("b2-endpoint", os.Getenv("LITESTREAM_B2_ENDPOINT"), "")
|
||||||
|
b2Bucket = flag.String("b2-bucket", os.Getenv("LITESTREAM_B2_BUCKET"), "")
|
||||||
|
)
|
||||||
|
|
||||||
// Google cloud storage settings
|
// Google cloud storage settings
|
||||||
var (
|
var (
|
||||||
gsBucket = flag.String("gs-bucket", os.Getenv("LITESTREAM_GS_BUCKET"), "")
|
gsBucket = flag.String("gs-bucket", os.Getenv("LITESTREAM_GS_BUCKET"), "")
|
||||||
@@ -241,6 +249,8 @@ func NewReplicaClient(tb testing.TB, typ string) litestream.ReplicaClient {
|
|||||||
return NewTigrisReplicaClient(tb)
|
return NewTigrisReplicaClient(tb)
|
||||||
case "r2":
|
case "r2":
|
||||||
return NewR2ReplicaClient(tb)
|
return NewR2ReplicaClient(tb)
|
||||||
|
case "b2":
|
||||||
|
return NewB2ReplicaClient(tb)
|
||||||
default:
|
default:
|
||||||
tb.Fatalf("invalid replica client type: %q", typ)
|
tb.Fatalf("invalid replica client type: %q", typ)
|
||||||
return nil
|
return nil
|
||||||
@@ -274,7 +284,7 @@ func NewTigrisReplicaClient(tb testing.TB) *s3.ReplicaClient {
|
|||||||
tb.Helper()
|
tb.Helper()
|
||||||
|
|
||||||
if *tigrisAccessKeyID == "" || *tigrisSecretAccessKey == "" {
|
if *tigrisAccessKeyID == "" || *tigrisSecretAccessKey == "" {
|
||||||
tb.Fatalf("tigris credentials not configured (set LITESTREAM_TIGRIS_ACCESS_KEY_ID/SECRET_ACCESS_KEY)")
|
tb.Skip("tigris credentials not configured (set LITESTREAM_TIGRIS_ACCESS_KEY_ID/SECRET_ACCESS_KEY)")
|
||||||
}
|
}
|
||||||
|
|
||||||
c := s3.NewReplicaClient()
|
c := s3.NewReplicaClient()
|
||||||
@@ -316,6 +326,33 @@ func NewR2ReplicaClient(tb testing.TB) *s3.ReplicaClient {
|
|||||||
return c
|
return c
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewB2ReplicaClient returns a new Backblaze B2 client for integration testing.
|
||||||
|
// B2 uses S3-compatible API with path-style URLs and signed payloads.
|
||||||
|
func NewB2ReplicaClient(tb testing.TB) *s3.ReplicaClient {
|
||||||
|
tb.Helper()
|
||||||
|
|
||||||
|
if *b2KeyID == "" || *b2ApplicationKey == "" {
|
||||||
|
tb.Skip("b2 credentials not configured (set LITESTREAM_B2_KEY_ID/APPLICATION_KEY)")
|
||||||
|
}
|
||||||
|
if *b2Endpoint == "" {
|
||||||
|
tb.Skip("b2 endpoint not configured (set LITESTREAM_B2_ENDPOINT)")
|
||||||
|
}
|
||||||
|
if *b2Bucket == "" {
|
||||||
|
tb.Skip("b2 bucket not configured (set LITESTREAM_B2_BUCKET)")
|
||||||
|
}
|
||||||
|
|
||||||
|
c := s3.NewReplicaClient()
|
||||||
|
c.AccessKeyID = *b2KeyID
|
||||||
|
c.SecretAccessKey = *b2ApplicationKey
|
||||||
|
c.Region = "us-west-002" // B2 uses region in endpoint format
|
||||||
|
c.Bucket = *b2Bucket
|
||||||
|
c.Path = path.Join("integration-tests", fmt.Sprintf("%016x", rand.Uint64()))
|
||||||
|
c.Endpoint = *b2Endpoint
|
||||||
|
c.ForcePathStyle = true
|
||||||
|
c.SignPayload = true
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
// NewGSReplicaClient returns a new client for integration testing.
|
// NewGSReplicaClient returns a new client for integration testing.
|
||||||
func NewGSReplicaClient(tb testing.TB) *gs.ReplicaClient {
|
func NewGSReplicaClient(tb testing.TB) *gs.ReplicaClient {
|
||||||
tb.Helper()
|
tb.Helper()
|
||||||
|
|||||||
@@ -544,3 +544,418 @@ AAAEDzV1D6COyvFGhSiZa6ll9aXZ2IMWED3KGrvCNjEEtYHwnK0+GdwOelXlAXdqLx/qvS
|
|||||||
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestReplicaClient_S3_MultipartThresholds tests multipart upload behavior at various
|
||||||
|
// size thresholds. These tests are critical for catching S3-compatible provider issues
|
||||||
|
// like #940, #941, #947 where multipart uploads fail with certain providers.
|
||||||
|
//
|
||||||
|
// NOTE: These tests skip moto due to multipart checksum validation issues (moto#8762).
|
||||||
|
// They should be run against real cloud providers using the manual integration workflow.
|
||||||
|
func TestReplicaClient_S3_MultipartThresholds(t *testing.T) {
|
||||||
|
if !slices.Contains(testingutil.ReplicaClientTypes(), "s3") {
|
||||||
|
t.Skip("Skipping S3-specific multipart threshold tests")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip if using mock endpoint (moto has multipart checksum issues)
|
||||||
|
if endpoint := os.Getenv("LITESTREAM_S3_ENDPOINT"); endpoint != "" {
|
||||||
|
if strings.Contains(endpoint, "127.0.0.1") || strings.Contains(endpoint, "localhost") {
|
||||||
|
t.Skip("Skipping multipart tests with mock endpoint (moto has checksum issues)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
sizeMB int
|
||||||
|
partSize int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "AtThreshold_5MB",
|
||||||
|
sizeMB: 5,
|
||||||
|
partSize: 5 * 1024 * 1024,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AboveThreshold_10MB",
|
||||||
|
sizeMB: 10,
|
||||||
|
partSize: 5 * 1024 * 1024,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Large_50MB",
|
||||||
|
sizeMB: 50,
|
||||||
|
partSize: 10 * 1024 * 1024,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if !testingutil.Integration() {
|
||||||
|
t.Skip("skipping integration test, use -integration flag to run")
|
||||||
|
}
|
||||||
|
|
||||||
|
c := testingutil.NewS3ReplicaClient(t)
|
||||||
|
c.Path = fmt.Sprintf("multipart-test/%016x", rand.Uint64())
|
||||||
|
c.PartSize = tt.partSize
|
||||||
|
c.Concurrency = 3
|
||||||
|
defer testingutil.MustDeleteAll(t, c)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
t.Fatalf("Init() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
size := tt.sizeMB * 1024 * 1024
|
||||||
|
payload := make([]byte, size)
|
||||||
|
for i := range payload {
|
||||||
|
payload[i] = byte(i % 256)
|
||||||
|
}
|
||||||
|
ltxData := createLTXData(1, 100, payload)
|
||||||
|
|
||||||
|
t.Logf("Testing %dMB file with %dMB parts", tt.sizeMB, tt.partSize/(1024*1024))
|
||||||
|
|
||||||
|
if _, err := c.WriteLTXFile(ctx, 0, ltx.TXID(1), ltx.TXID(100), bytes.NewReader(ltxData)); err != nil {
|
||||||
|
t.Fatalf("WriteLTXFile() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := c.OpenLTXFile(ctx, 0, ltx.TXID(1), ltx.TXID(100), 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("OpenLTXFile() error: %v", err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
buf, err := io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadAll() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(buf) != len(ltxData) {
|
||||||
|
t.Errorf("size mismatch: got %d, want %d", len(buf), len(ltxData))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(buf, ltxData) {
|
||||||
|
t.Errorf("data mismatch: uploaded and downloaded data do not match")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReplicaClient_S3_ConcurrencyLimits tests that concurrency limits are respected
|
||||||
|
// during multipart uploads. This is important for providers like Cloudflare R2 that
|
||||||
|
// have strict concurrent upload limits (issue #948).
|
||||||
|
func TestReplicaClient_S3_ConcurrencyLimits(t *testing.T) {
|
||||||
|
if !slices.Contains(testingutil.ReplicaClientTypes(), "s3") {
|
||||||
|
t.Skip("Skipping S3-specific concurrency test")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip if using mock endpoint
|
||||||
|
if endpoint := os.Getenv("LITESTREAM_S3_ENDPOINT"); endpoint != "" {
|
||||||
|
if strings.Contains(endpoint, "127.0.0.1") || strings.Contains(endpoint, "localhost") {
|
||||||
|
t.Skip("Skipping concurrency test with mock endpoint")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !testingutil.Integration() {
|
||||||
|
t.Skip("skipping integration test, use -integration flag to run")
|
||||||
|
}
|
||||||
|
|
||||||
|
concurrencyLevels := []int{1, 2, 5}
|
||||||
|
|
||||||
|
for _, concurrency := range concurrencyLevels {
|
||||||
|
t.Run(fmt.Sprintf("Concurrency_%d", concurrency), func(t *testing.T) {
|
||||||
|
c := testingutil.NewS3ReplicaClient(t)
|
||||||
|
c.Path = fmt.Sprintf("concurrency-test/%016x", rand.Uint64())
|
||||||
|
c.PartSize = 5 * 1024 * 1024
|
||||||
|
c.Concurrency = concurrency
|
||||||
|
defer testingutil.MustDeleteAll(t, c)
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
if err := c.Init(ctx); err != nil {
|
||||||
|
t.Fatalf("Init() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
size := 15 * 1024 * 1024
|
||||||
|
payload := make([]byte, size)
|
||||||
|
for i := range payload {
|
||||||
|
payload[i] = byte(i % 256)
|
||||||
|
}
|
||||||
|
ltxData := createLTXData(1, 100, payload)
|
||||||
|
|
||||||
|
t.Logf("Testing 15MB file with concurrency=%d", concurrency)
|
||||||
|
|
||||||
|
if _, err := c.WriteLTXFile(ctx, 0, ltx.TXID(1), ltx.TXID(100), bytes.NewReader(ltxData)); err != nil {
|
||||||
|
t.Fatalf("WriteLTXFile() with concurrency=%d error: %v", concurrency, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
r, err := c.OpenLTXFile(ctx, 0, ltx.TXID(1), ltx.TXID(100), 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("OpenLTXFile() error: %v", err)
|
||||||
|
}
|
||||||
|
defer r.Close()
|
||||||
|
|
||||||
|
buf, err := io.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("ReadAll() error: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(buf, ltxData) {
|
||||||
|
t.Errorf("data mismatch at concurrency=%d", concurrency)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReplicaClient_PITR_ManyLTXFiles tests point-in-time restore with many LTX files.
|
||||||
|
// This is a regression test for issue #930 where HeadObject calls with 100+ LTX files
|
||||||
|
// caused the restore operation to hang.
|
||||||
|
func TestReplicaClient_PITR_ManyLTXFiles(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
fileCount int
|
||||||
|
timeout time.Duration
|
||||||
|
}{
|
||||||
|
{"100_Files", 100, 2 * time.Minute},
|
||||||
|
{"500_Files", 500, 5 * time.Minute},
|
||||||
|
{"1000_Files", 1000, 10 * time.Minute},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
RunWithReplicaClient(t, tt.name, func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
// Skip very long tests unless explicitly enabled
|
||||||
|
if tt.fileCount > 100 && os.Getenv("LITESTREAM_PITR_STRESS_TEST") == "" {
|
||||||
|
t.Skipf("Skipping %d file stress test (set LITESTREAM_PITR_STRESS_TEST=1 to enable)", tt.fileCount)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), tt.timeout)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
baseTime := time.Now().Add(-time.Duration(tt.fileCount) * time.Minute)
|
||||||
|
t.Logf("Creating %d LTX files starting from %v", tt.fileCount, baseTime)
|
||||||
|
|
||||||
|
// Create snapshot at TXID 1
|
||||||
|
snapshot := createLTXDataWithTimestamp(1, 1, baseTime, []byte("snapshot"))
|
||||||
|
if _, err := c.WriteLTXFile(ctx, litestream.SnapshotLevel, 1, 1, bytes.NewReader(snapshot)); err != nil {
|
||||||
|
t.Fatalf("WriteLTXFile(snapshot): %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create many L0 files with incrementing timestamps
|
||||||
|
for i := 2; i <= tt.fileCount; i++ {
|
||||||
|
ts := baseTime.Add(time.Duration(i-1) * time.Minute)
|
||||||
|
data := createLTXDataWithTimestamp(ltx.TXID(i), ltx.TXID(i), ts, []byte(fmt.Sprintf("file-%d", i)))
|
||||||
|
if _, err := c.WriteLTXFile(ctx, 0, ltx.TXID(i), ltx.TXID(i), bytes.NewReader(data)); err != nil {
|
||||||
|
t.Fatalf("WriteLTXFile(%d): %v", i, err)
|
||||||
|
}
|
||||||
|
if i%100 == 0 {
|
||||||
|
t.Logf("Created %d/%d files", i, tt.fileCount)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 1: Iterate all L0 files without metadata (fast path)
|
||||||
|
t.Log("Testing L0 file iteration without metadata")
|
||||||
|
startFast := time.Now()
|
||||||
|
itr, err := c.LTXFiles(ctx, 0, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("LTXFiles(useMetadata=false): %v", err)
|
||||||
|
}
|
||||||
|
var countFast int
|
||||||
|
for itr.Next() {
|
||||||
|
countFast++
|
||||||
|
}
|
||||||
|
if err := itr.Close(); err != nil {
|
||||||
|
t.Fatalf("Iterator close: %v", err)
|
||||||
|
}
|
||||||
|
durationFast := time.Since(startFast)
|
||||||
|
t.Logf("Fast iteration: %d files in %v", countFast, durationFast)
|
||||||
|
|
||||||
|
if countFast != tt.fileCount-1 {
|
||||||
|
t.Errorf("Fast iteration count: got %d, want %d", countFast, tt.fileCount-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test 2: Iterate all L0 files with metadata (required for PITR)
|
||||||
|
// This is the path that was hanging in issue #930
|
||||||
|
t.Log("Testing L0 file iteration with metadata (PITR path)")
|
||||||
|
startMeta := time.Now()
|
||||||
|
itrMeta, err := c.LTXFiles(ctx, 0, 0, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("LTXFiles(useMetadata=true): %v", err)
|
||||||
|
}
|
||||||
|
var countMeta int
|
||||||
|
for itrMeta.Next() {
|
||||||
|
countMeta++
|
||||||
|
}
|
||||||
|
if err := itrMeta.Close(); err != nil {
|
||||||
|
t.Fatalf("Iterator close: %v", err)
|
||||||
|
}
|
||||||
|
durationMeta := time.Since(startMeta)
|
||||||
|
t.Logf("Metadata iteration: %d files in %v", countMeta, durationMeta)
|
||||||
|
|
||||||
|
if countMeta != tt.fileCount-1 {
|
||||||
|
t.Errorf("Metadata iteration count: got %d, want %d", countMeta, tt.fileCount-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify metadata iteration completed within reasonable time
|
||||||
|
// (issue #930 caused this to hang indefinitely)
|
||||||
|
if durationMeta > tt.timeout/2 {
|
||||||
|
t.Errorf("Metadata iteration took too long: %v (should be < %v)", durationMeta, tt.timeout/2)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReplicaClient_PITR_TimestampFiltering tests that PITR correctly filters files
|
||||||
|
// by timestamp across a range of LTX files.
|
||||||
|
func TestReplicaClient_PITR_TimestampFiltering(t *testing.T) {
|
||||||
|
RunWithReplicaClient(t, "TimestampFilter", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
fileCount := 50
|
||||||
|
baseTime := time.Now().Add(-time.Duration(fileCount) * time.Minute)
|
||||||
|
|
||||||
|
// Create snapshot at TXID 1
|
||||||
|
snapshot := createLTXDataWithTimestamp(1, 1, baseTime, []byte("snapshot"))
|
||||||
|
if _, err := c.WriteLTXFile(ctx, litestream.SnapshotLevel, 1, 1, bytes.NewReader(snapshot)); err != nil {
|
||||||
|
t.Fatalf("WriteLTXFile(snapshot): %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create L0 files with known timestamps
|
||||||
|
for i := 2; i <= fileCount; i++ {
|
||||||
|
ts := baseTime.Add(time.Duration(i-1) * time.Minute)
|
||||||
|
data := createLTXDataWithTimestamp(ltx.TXID(i), ltx.TXID(i), ts, []byte(fmt.Sprintf("file-%d", i)))
|
||||||
|
if _, err := c.WriteLTXFile(ctx, 0, ltx.TXID(i), ltx.TXID(i), bytes.NewReader(data)); err != nil {
|
||||||
|
t.Fatalf("WriteLTXFile(%d): %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test filtering at various timestamp points
|
||||||
|
testPoints := []struct {
|
||||||
|
name string
|
||||||
|
offsetMins int
|
||||||
|
expectCount int
|
||||||
|
}{
|
||||||
|
{"Beginning", 5, 4}, // Files 2-5 (4 files)
|
||||||
|
{"Quarter", 12, 11}, // Files 2-12 (11 files)
|
||||||
|
{"Middle", 25, 24}, // Files 2-25 (24 files)
|
||||||
|
{"ThreeQuarters", 37, 36}, // Files 2-37 (36 files)
|
||||||
|
{"End", fileCount - 1, fileCount - 2}, // All but last
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tp := range testPoints {
|
||||||
|
t.Run(tp.name, func(t *testing.T) {
|
||||||
|
targetTime := baseTime.Add(time.Duration(tp.offsetMins) * time.Minute)
|
||||||
|
t.Logf("Filtering files before %v (offset: %d mins)", targetTime, tp.offsetMins)
|
||||||
|
|
||||||
|
// Use LTXFiles with metadata to get accurate timestamps
|
||||||
|
itr, err := c.LTXFiles(ctx, 0, 0, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("LTXFiles: %v", err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
for itr.Next() {
|
||||||
|
info := itr.Item()
|
||||||
|
if info.CreatedAt.Before(targetTime) {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Allow for timestamp precision variance
|
||||||
|
if count < tp.expectCount-1 || count > tp.expectCount+1 {
|
||||||
|
t.Errorf("Files before %v: got %d, expected ~%d", targetTime, count, tp.expectCount)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReplicaClient_PITR_CalcRestorePlanWithManyFiles tests CalcRestorePlan with
|
||||||
|
// a large number of LTX files. This ensures restore planning doesn't hang.
|
||||||
|
func TestReplicaClient_PITR_CalcRestorePlanWithManyFiles(t *testing.T) {
|
||||||
|
db, sqldb := testingutil.MustOpenDBs(t)
|
||||||
|
defer testingutil.MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
RunWithReplicaClient(t, "RestorePlan", func(t *testing.T, c litestream.ReplicaClient) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
fileCount := 100
|
||||||
|
baseTime := time.Now().Add(-time.Duration(fileCount) * time.Minute)
|
||||||
|
|
||||||
|
// Create snapshot
|
||||||
|
snapshot := createLTXDataWithTimestamp(1, 1, baseTime, []byte("snapshot"))
|
||||||
|
if _, err := c.WriteLTXFile(ctx, litestream.SnapshotLevel, 1, 1, bytes.NewReader(snapshot)); err != nil {
|
||||||
|
t.Fatalf("WriteLTXFile(snapshot): %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create L0 files
|
||||||
|
for i := 2; i <= fileCount; i++ {
|
||||||
|
ts := baseTime.Add(time.Duration(i-1) * time.Minute)
|
||||||
|
data := createLTXDataWithTimestamp(ltx.TXID(i), ltx.TXID(i), ts, []byte(fmt.Sprintf("file-%d", i)))
|
||||||
|
if _, err := c.WriteLTXFile(ctx, 0, ltx.TXID(i), ltx.TXID(i), bytes.NewReader(data)); err != nil {
|
||||||
|
t.Fatalf("WriteLTXFile(%d): %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test restore plan calculation at various points
|
||||||
|
testTargets := []struct {
|
||||||
|
name string
|
||||||
|
txID ltx.TXID
|
||||||
|
minFiles int
|
||||||
|
}{
|
||||||
|
{"EarlyTXID", 10, 2}, // snapshot + some L0
|
||||||
|
{"MidTXID", 50, 2}, // snapshot + more L0
|
||||||
|
{"LateTXID", 90, 2}, // snapshot + most L0
|
||||||
|
{"LatestTXID", ltx.TXID(fileCount), 2}, // all files
|
||||||
|
}
|
||||||
|
|
||||||
|
logger := slog.Default()
|
||||||
|
|
||||||
|
for _, target := range testTargets {
|
||||||
|
t.Run(target.name, func(t *testing.T) {
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
plan, err := litestream.CalcRestorePlan(ctx, c, target.txID, time.Time{}, logger)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("CalcRestorePlan(%d): %v", target.txID, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
t.Logf("CalcRestorePlan(txid=%d): %d files in %v", target.txID, len(plan), duration)
|
||||||
|
|
||||||
|
if len(plan) < target.minFiles {
|
||||||
|
t.Errorf("Plan has too few files: got %d, want >= %d", len(plan), target.minFiles)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify plan doesn't take excessively long
|
||||||
|
if duration > 30*time.Second {
|
||||||
|
t.Errorf("CalcRestorePlan took too long: %v (should be < 30s)", duration)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test timestamp-based restore plan
|
||||||
|
t.Run("TimestampBased", func(t *testing.T) {
|
||||||
|
// Target halfway through the files
|
||||||
|
targetTime := baseTime.Add(time.Duration(fileCount/2) * time.Minute)
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
plan, err := litestream.CalcRestorePlan(ctx, c, 0, targetTime, logger)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("CalcRestorePlan(timestamp=%v): %v", targetTime, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
duration := time.Since(startTime)
|
||||||
|
t.Logf("CalcRestorePlan(timestamp=%v): %d files in %v", targetTime, len(plan), duration)
|
||||||
|
|
||||||
|
if len(plan) < 2 {
|
||||||
|
t.Errorf("Plan has too few files: got %d, want >= 2", len(plan))
|
||||||
|
}
|
||||||
|
|
||||||
|
if duration > 60*time.Second {
|
||||||
|
t.Errorf("Timestamp-based CalcRestorePlan took too long: %v", duration)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -951,6 +951,100 @@ func TestIsLocalEndpoint(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestS3ProviderDefaults tests that provider-specific defaults are applied
|
||||||
|
// when creating S3 clients from URLs with provider endpoints.
|
||||||
|
// These tests ensure edge case bugs like #912, #918, #940, #947 don't regress.
|
||||||
|
func TestS3ProviderDefaults(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
url string
|
||||||
|
wantSignPayload bool
|
||||||
|
wantForcePathStyle bool
|
||||||
|
wantRequireMD5 bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "CloudflareR2_SignPayload",
|
||||||
|
url: "s3://mybucket/path?endpoint=https://account123.r2.cloudflarestorage.com",
|
||||||
|
wantSignPayload: true,
|
||||||
|
wantForcePathStyle: true, // Custom endpoint default
|
||||||
|
wantRequireMD5: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "BackblazeB2_SignPayloadAndPathStyle",
|
||||||
|
url: "s3://mybucket/path?endpoint=https://s3.us-west-002.backblazeb2.com",
|
||||||
|
wantSignPayload: true,
|
||||||
|
wantForcePathStyle: true,
|
||||||
|
wantRequireMD5: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DigitalOcean_SignPayload",
|
||||||
|
url: "s3://mybucket/path?endpoint=https://sfo3.digitaloceanspaces.com",
|
||||||
|
wantSignPayload: true,
|
||||||
|
wantForcePathStyle: true, // Custom endpoint default
|
||||||
|
wantRequireMD5: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Scaleway_SignPayload",
|
||||||
|
url: "s3://mybucket/path?endpoint=https://s3.fr-par.scw.cloud",
|
||||||
|
wantSignPayload: true,
|
||||||
|
wantForcePathStyle: true, // Custom endpoint default
|
||||||
|
wantRequireMD5: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Filebase_SignPayloadAndPathStyle",
|
||||||
|
url: "s3://mybucket/path?endpoint=https://s3.filebase.com",
|
||||||
|
wantSignPayload: true,
|
||||||
|
wantForcePathStyle: true,
|
||||||
|
wantRequireMD5: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Tigris_SignPayloadNoMD5",
|
||||||
|
url: "s3://mybucket/path?endpoint=https://fly.storage.tigris.dev",
|
||||||
|
wantSignPayload: true,
|
||||||
|
wantForcePathStyle: true, // Custom endpoint default
|
||||||
|
wantRequireMD5: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MinIO_SignPayloadAndPathStyle",
|
||||||
|
url: "s3://mybucket/path?endpoint=http://localhost:9000",
|
||||||
|
wantSignPayload: true,
|
||||||
|
wantForcePathStyle: true,
|
||||||
|
wantRequireMD5: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AWS_Defaults",
|
||||||
|
url: "s3://mybucket/path",
|
||||||
|
wantSignPayload: true, // Default
|
||||||
|
wantForcePathStyle: false,
|
||||||
|
wantRequireMD5: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
client, err := litestream.NewReplicaClientFromURL(tt.url)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewReplicaClientFromURL(%q) error: %v", tt.url, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
s3Client, ok := client.(*s3.ReplicaClient)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("expected *s3.ReplicaClient, got %T", client)
|
||||||
|
}
|
||||||
|
|
||||||
|
if s3Client.SignPayload != tt.wantSignPayload {
|
||||||
|
t.Errorf("SignPayload = %v, want %v", s3Client.SignPayload, tt.wantSignPayload)
|
||||||
|
}
|
||||||
|
if s3Client.ForcePathStyle != tt.wantForcePathStyle {
|
||||||
|
t.Errorf("ForcePathStyle = %v, want %v", s3Client.ForcePathStyle, tt.wantForcePathStyle)
|
||||||
|
}
|
||||||
|
if s3Client.RequireContentMD5 != tt.wantRequireMD5 {
|
||||||
|
t.Errorf("RequireContentMD5 = %v, want %v", s3Client.RequireContentMD5, tt.wantRequireMD5)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestEnsureEndpointScheme(t *testing.T) {
|
func TestEnsureEndpointScheme(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
input string
|
input string
|
||||||
@@ -992,3 +1086,40 @@ func TestEnsureEndpointScheme(t *testing.T) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestS3ProviderDefaults_QueryParamOverrides tests that explicit query parameters
|
||||||
|
// override provider-specific defaults.
|
||||||
|
func TestS3ProviderDefaults_QueryParamOverrides(t *testing.T) {
|
||||||
|
t.Run("SignPayload_ExplicitFalse", func(t *testing.T) {
|
||||||
|
client, err := litestream.NewReplicaClientFromURL("s3://mybucket/path?endpoint=https://account123.r2.cloudflarestorage.com&sign-payload=false")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
s3Client := client.(*s3.ReplicaClient)
|
||||||
|
if s3Client.SignPayload != false {
|
||||||
|
t.Errorf("SignPayload = %v, want false (explicit override)", s3Client.SignPayload)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("ForcePathStyle_ExplicitFalse", func(t *testing.T) {
|
||||||
|
client, err := litestream.NewReplicaClientFromURL("s3://mybucket/path?endpoint=https://s3.us-west-002.backblazeb2.com&forcePathStyle=false")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
s3Client := client.(*s3.ReplicaClient)
|
||||||
|
if s3Client.ForcePathStyle != false {
|
||||||
|
t.Errorf("ForcePathStyle = %v, want false (explicit override)", s3Client.ForcePathStyle)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("RequireMD5_ExplicitTrue_Tigris", func(t *testing.T) {
|
||||||
|
client, err := litestream.NewReplicaClientFromURL("s3://mybucket/path?endpoint=https://fly.storage.tigris.dev&require-content-md5=true")
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
s3Client := client.(*s3.ReplicaClient)
|
||||||
|
if s3Client.RequireContentMD5 != true {
|
||||||
|
t.Errorf("RequireContentMD5 = %v, want true (explicit override)", s3Client.RequireContentMD5)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|||||||
@@ -23,6 +23,8 @@ import (
|
|||||||
"github.com/aws/smithy-go/middleware"
|
"github.com/aws/smithy-go/middleware"
|
||||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||||
"github.com/superfly/ltx"
|
"github.com/superfly/ltx"
|
||||||
|
|
||||||
|
litestream "github.com/benbjohnson/litestream"
|
||||||
)
|
)
|
||||||
|
|
||||||
// mockAPIError implements smithy.APIError for testing
|
// mockAPIError implements smithy.APIError for testing
|
||||||
@@ -1658,3 +1660,160 @@ func TestReplicaClient_NoSSE_Headers(t *testing.T) {
|
|||||||
t.Fatal("timeout waiting for PUT request")
|
t.Fatal("timeout waiting for PUT request")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TestReplicaClient_R2ConcurrencyDefault tests that Cloudflare R2 endpoints get
|
||||||
|
// Concurrency=2 by default to avoid their strict concurrent upload limits.
|
||||||
|
// This is a regression test for issue #948.
|
||||||
|
//
|
||||||
|
// NOTE: This test is skipped until issue #948 is fixed. Once the fix is merged,
|
||||||
|
// remove the t.Skip() call and the test should pass.
|
||||||
|
func TestReplicaClient_R2ConcurrencyDefault(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
url string
|
||||||
|
wantConcurrency int
|
||||||
|
skipReason string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "R2_DefaultConcurrency",
|
||||||
|
url: "s3://mybucket/path?endpoint=https://account123.r2.cloudflarestorage.com",
|
||||||
|
wantConcurrency: 2,
|
||||||
|
skipReason: "pending issue #948 fix",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AWS_NoConcurrencyOverride",
|
||||||
|
url: "s3://mybucket/path",
|
||||||
|
wantConcurrency: 0,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MinIO_NoConcurrencyOverride",
|
||||||
|
url: "s3://mybucket/path?endpoint=http://localhost:9000",
|
||||||
|
wantConcurrency: 0,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if tt.skipReason != "" {
|
||||||
|
t.Skip(tt.skipReason)
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := litestream.NewReplicaClientFromURL(tt.url)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("NewReplicaClientFromURL() error: %v", err)
|
||||||
|
}
|
||||||
|
c := client.(*ReplicaClient)
|
||||||
|
|
||||||
|
if c.Concurrency != tt.wantConcurrency {
|
||||||
|
t.Errorf("Concurrency = %d, want %d", c.Concurrency, tt.wantConcurrency)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReplicaClient_ProviderEndpointDetection tests the endpoint detection functions
|
||||||
|
// used to apply provider-specific defaults.
|
||||||
|
func TestReplicaClient_ProviderEndpointDetection(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
endpoint string
|
||||||
|
wantR2 bool
|
||||||
|
wantB2 bool
|
||||||
|
wantDO bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "CloudflareR2",
|
||||||
|
endpoint: "https://accountid.r2.cloudflarestorage.com",
|
||||||
|
wantR2: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "CloudflareR2_HTTP",
|
||||||
|
endpoint: "http://accountid.r2.cloudflarestorage.com",
|
||||||
|
wantR2: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "BackblazeB2",
|
||||||
|
endpoint: "https://s3.us-west-002.backblazeb2.com",
|
||||||
|
wantB2: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "DigitalOcean",
|
||||||
|
endpoint: "https://sgp1.digitaloceanspaces.com",
|
||||||
|
wantDO: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "AWS_S3",
|
||||||
|
endpoint: "",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MinIO",
|
||||||
|
endpoint: "http://localhost:9000",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if got := litestream.IsCloudflareR2Endpoint(tt.endpoint); got != tt.wantR2 {
|
||||||
|
t.Errorf("IsCloudflareR2Endpoint() = %v, want %v", got, tt.wantR2)
|
||||||
|
}
|
||||||
|
if got := litestream.IsBackblazeEndpoint(tt.endpoint); got != tt.wantB2 {
|
||||||
|
t.Errorf("IsBackblazeEndpoint() = %v, want %v", got, tt.wantB2)
|
||||||
|
}
|
||||||
|
if got := litestream.IsDigitalOceanEndpoint(tt.endpoint); got != tt.wantDO {
|
||||||
|
t.Errorf("IsDigitalOceanEndpoint() = %v, want %v", got, tt.wantDO)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestReplicaClient_CustomEndpoint_DisablesChecksumFeatures tests that custom endpoints
|
||||||
|
// (non-AWS S3) have SDK checksum features disabled to avoid aws-chunked encoding issues.
|
||||||
|
// This addresses issues #895, #912, #940, #941, #947 where S3-compatible providers
|
||||||
|
// don't support aws-chunked encoding or streaming checksums.
|
||||||
|
func TestReplicaClient_CustomEndpoint_DisablesChecksumFeatures(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
endpoint string
|
||||||
|
wantChecksumCalc string
|
||||||
|
wantChecksumValid string
|
||||||
|
expectCustomConfig bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "AWS_S3_NoCustomConfig",
|
||||||
|
endpoint: "",
|
||||||
|
expectCustomConfig: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "R2_DisablesChecksums",
|
||||||
|
endpoint: "https://account.r2.cloudflarestorage.com",
|
||||||
|
expectCustomConfig: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "B2_DisablesChecksums",
|
||||||
|
endpoint: "https://s3.us-west-002.backblazeb2.com",
|
||||||
|
expectCustomConfig: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "MinIO_DisablesChecksums",
|
||||||
|
endpoint: "http://localhost:9000",
|
||||||
|
expectCustomConfig: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
c := NewReplicaClient()
|
||||||
|
c.Bucket = "test-bucket"
|
||||||
|
c.Region = "us-east-1"
|
||||||
|
c.Endpoint = tt.endpoint
|
||||||
|
c.ForcePathStyle = true
|
||||||
|
c.AccessKeyID = "test"
|
||||||
|
c.SecretAccessKey = "test"
|
||||||
|
|
||||||
|
hasCustomEndpoint := c.Endpoint != ""
|
||||||
|
if hasCustomEndpoint != tt.expectCustomConfig {
|
||||||
|
t.Errorf("custom endpoint detection = %v, want %v", hasCustomEndpoint, tt.expectCustomConfig)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
629
tests/integration/compatibility_test.go
Normal file
629
tests/integration/compatibility_test.go
Normal file
@@ -0,0 +1,629 @@
|
|||||||
|
package integration_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/superfly/ltx"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/file"
|
||||||
|
"github.com/benbjohnson/litestream/internal/testingutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// TestRestore_FormatConsistency tests that backups created by the current version
|
||||||
|
// can be restored by the same version. This is a basic sanity check that should
|
||||||
|
// always pass.
|
||||||
|
func TestRestore_FormatConsistency(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Create a database with test data
|
||||||
|
db, sqldb := testingutil.MustOpenDBs(t)
|
||||||
|
defer testingutil.MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
// Insert initial data
|
||||||
|
if _, err := sqldb.ExecContext(ctx, `CREATE TABLE compat_test(id INTEGER PRIMARY KEY, data TEXT);`); err != nil {
|
||||||
|
t.Fatalf("create table: %v", err)
|
||||||
|
}
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
if _, err := sqldb.ExecContext(ctx, `INSERT INTO compat_test(data) VALUES(?);`, fmt.Sprintf("data-%d", i)); err != nil {
|
||||||
|
t.Fatalf("insert: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync to replica
|
||||||
|
if err := db.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("sync: %v", err)
|
||||||
|
}
|
||||||
|
if err := db.Replica.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("replica sync: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Checkpoint to ensure data is persisted
|
||||||
|
if err := db.Checkpoint(ctx, litestream.CheckpointModeTruncate); err != nil {
|
||||||
|
t.Fatalf("checkpoint: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add more data after checkpoint
|
||||||
|
for i := 100; i < 150; i++ {
|
||||||
|
if _, err := sqldb.ExecContext(ctx, `INSERT INTO compat_test(data) VALUES(?);`, fmt.Sprintf("data-%d", i)); err != nil {
|
||||||
|
t.Fatalf("insert: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sync again
|
||||||
|
if err := db.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("sync: %v", err)
|
||||||
|
}
|
||||||
|
if err := db.Replica.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("replica sync: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify LTX files exist
|
||||||
|
itr, err := db.Replica.Client.LTXFiles(ctx, 0, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("list LTX files: %v", err)
|
||||||
|
}
|
||||||
|
var fileCount int
|
||||||
|
for itr.Next() {
|
||||||
|
fileCount++
|
||||||
|
}
|
||||||
|
if err := itr.Close(); err != nil {
|
||||||
|
t.Fatalf("close iterator: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Created %d L0 files", fileCount)
|
||||||
|
|
||||||
|
// Restore to a new location
|
||||||
|
restorePath := filepath.Join(t.TempDir(), "restored.db")
|
||||||
|
if err := db.Replica.Restore(ctx, litestream.RestoreOptions{
|
||||||
|
OutputPath: restorePath,
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("restore: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify restored data
|
||||||
|
restoredDB := testingutil.MustOpenSQLDB(t, restorePath)
|
||||||
|
defer restoredDB.Close()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
if err := restoredDB.QueryRowContext(ctx, `SELECT COUNT(*) FROM compat_test;`).Scan(&count); err != nil {
|
||||||
|
t.Fatalf("count: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 150 {
|
||||||
|
t.Errorf("restored row count: got %d, want 150", count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify integrity
|
||||||
|
var integrity string
|
||||||
|
if err := restoredDB.QueryRowContext(ctx, `PRAGMA integrity_check;`).Scan(&integrity); err != nil {
|
||||||
|
t.Fatalf("integrity check: %v", err)
|
||||||
|
}
|
||||||
|
if integrity != "ok" {
|
||||||
|
t.Errorf("integrity check: %s", integrity)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRestore_MultipleSyncs tests restore after many sync cycles to ensure
|
||||||
|
// LTX file accumulation doesn't cause issues.
|
||||||
|
func TestRestore_MultipleSyncs(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
db, sqldb := testingutil.MustOpenDBs(t)
|
||||||
|
defer testingutil.MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
if _, err := sqldb.ExecContext(ctx, `CREATE TABLE sync_test(id INTEGER PRIMARY KEY, batch INTEGER, data BLOB);`); err != nil {
|
||||||
|
t.Fatalf("create table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform multiple sync cycles
|
||||||
|
const syncCycles = 50
|
||||||
|
for batch := 0; batch < syncCycles; batch++ {
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
if _, err := sqldb.ExecContext(ctx, `INSERT INTO sync_test(batch, data) VALUES(?, randomblob(500));`, batch); err != nil {
|
||||||
|
t.Fatalf("insert: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := db.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("sync %d: %v", batch, err)
|
||||||
|
}
|
||||||
|
if err := db.Replica.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("replica sync %d: %v", batch, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify LTX files
|
||||||
|
itr, err := db.Replica.Client.LTXFiles(ctx, 0, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("list LTX files: %v", err)
|
||||||
|
}
|
||||||
|
var fileCount int
|
||||||
|
for itr.Next() {
|
||||||
|
fileCount++
|
||||||
|
}
|
||||||
|
if err := itr.Close(); err != nil {
|
||||||
|
t.Fatalf("close iterator: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("Created %d L0 files over %d sync cycles", fileCount, syncCycles)
|
||||||
|
|
||||||
|
// Restore
|
||||||
|
restorePath := filepath.Join(t.TempDir(), "restored.db")
|
||||||
|
if err := db.Replica.Restore(ctx, litestream.RestoreOptions{
|
||||||
|
OutputPath: restorePath,
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("restore: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify
|
||||||
|
restoredDB := testingutil.MustOpenSQLDB(t, restorePath)
|
||||||
|
defer restoredDB.Close()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
if err := restoredDB.QueryRowContext(ctx, `SELECT COUNT(*) FROM sync_test;`).Scan(&count); err != nil {
|
||||||
|
t.Fatalf("count: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := syncCycles * 10
|
||||||
|
if count != expected {
|
||||||
|
t.Errorf("restored row count: got %d, want %d", count, expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRestore_LTXFileValidation tests that invalid LTX files are properly
|
||||||
|
// detected and rejected during restore.
|
||||||
|
func TestRestore_LTXFileValidation(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
replicaDir := t.TempDir()
|
||||||
|
client := file.NewReplicaClient(replicaDir)
|
||||||
|
|
||||||
|
// Create a valid snapshot first
|
||||||
|
validSnapshot := createValidLTXData(t, 1, 1, time.Now())
|
||||||
|
if _, err := client.WriteLTXFile(ctx, litestream.SnapshotLevel, 1, 1, bytes.NewReader(validSnapshot)); err != nil {
|
||||||
|
t.Fatalf("write snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
data []byte
|
||||||
|
minTXID ltx.TXID
|
||||||
|
maxTXID ltx.TXID
|
||||||
|
expectError bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "ValidL0File",
|
||||||
|
data: createValidLTXData(t, 2, 2, time.Now()),
|
||||||
|
minTXID: 2,
|
||||||
|
maxTXID: 2,
|
||||||
|
expectError: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "EmptyFile",
|
||||||
|
data: []byte{},
|
||||||
|
minTXID: 3,
|
||||||
|
maxTXID: 3,
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "TruncatedHeader",
|
||||||
|
data: []byte("truncated"),
|
||||||
|
minTXID: 4,
|
||||||
|
maxTXID: 4,
|
||||||
|
expectError: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
if _, err := client.WriteLTXFile(ctx, 0, tt.minTXID, tt.maxTXID, bytes.NewReader(tt.data)); err != nil {
|
||||||
|
t.Logf("write failed (may be expected): %v", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRestore_CrossPlatformPaths tests that backups work with different path styles.
|
||||||
|
func TestRestore_CrossPlatformPaths(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
pathTests := []string{
|
||||||
|
"simple",
|
||||||
|
"path/with/slashes",
|
||||||
|
"path-with-dashes",
|
||||||
|
"path_with_underscores",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, subpath := range pathTests {
|
||||||
|
t.Run(subpath, func(t *testing.T) {
|
||||||
|
replicaDir := t.TempDir()
|
||||||
|
fullPath := filepath.Join(replicaDir, subpath)
|
||||||
|
|
||||||
|
client := file.NewReplicaClient(fullPath)
|
||||||
|
|
||||||
|
// Create snapshot
|
||||||
|
snapshot := createValidLTXData(t, 1, 1, time.Now())
|
||||||
|
if _, err := client.WriteLTXFile(ctx, litestream.SnapshotLevel, 1, 1, bytes.NewReader(snapshot)); err != nil {
|
||||||
|
t.Fatalf("write snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create L0 files
|
||||||
|
for i := 2; i <= 5; i++ {
|
||||||
|
data := createValidLTXData(t, ltx.TXID(i), ltx.TXID(i), time.Now())
|
||||||
|
if _, err := client.WriteLTXFile(ctx, 0, ltx.TXID(i), ltx.TXID(i), bytes.NewReader(data)); err != nil {
|
||||||
|
t.Fatalf("write L0 %d: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify files exist
|
||||||
|
itr, err := client.LTXFiles(ctx, 0, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("list files: %v", err)
|
||||||
|
}
|
||||||
|
var count int
|
||||||
|
for itr.Next() {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
itr.Close()
|
||||||
|
|
||||||
|
if count != 4 {
|
||||||
|
t.Errorf("file count: got %d, want 4", count)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestRestore_PointInTimeAccuracy tests that point-in-time restore respects
|
||||||
|
// timestamps correctly.
|
||||||
|
func TestRestore_PointInTimeAccuracy(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
replicaDir := t.TempDir()
|
||||||
|
client := file.NewReplicaClient(replicaDir)
|
||||||
|
|
||||||
|
baseTime := time.Now().Add(-10 * time.Minute)
|
||||||
|
|
||||||
|
// Create snapshot at baseTime
|
||||||
|
snapshot := createValidLTXData(t, 1, 1, baseTime)
|
||||||
|
if _, err := client.WriteLTXFile(ctx, litestream.SnapshotLevel, 1, 1, bytes.NewReader(snapshot)); err != nil {
|
||||||
|
t.Fatalf("write snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create L0 files at 1-minute intervals
|
||||||
|
for i := 2; i <= 10; i++ {
|
||||||
|
ts := baseTime.Add(time.Duration(i-1) * time.Minute)
|
||||||
|
data := createValidLTXData(t, ltx.TXID(i), ltx.TXID(i), ts)
|
||||||
|
if _, err := client.WriteLTXFile(ctx, 0, ltx.TXID(i), ltx.TXID(i), bytes.NewReader(data)); err != nil {
|
||||||
|
t.Fatalf("write L0 %d: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify timestamps are preserved when listing with metadata
|
||||||
|
itr, err := client.LTXFiles(ctx, 0, 0, true)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("list files: %v", err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var files []*ltx.FileInfo
|
||||||
|
for itr.Next() {
|
||||||
|
info := itr.Item()
|
||||||
|
files = append(files, <x.FileInfo{
|
||||||
|
Level: info.Level,
|
||||||
|
MinTXID: info.MinTXID,
|
||||||
|
MaxTXID: info.MaxTXID,
|
||||||
|
CreatedAt: info.CreatedAt,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(files) != 9 {
|
||||||
|
t.Fatalf("file count: got %d, want 9", len(files))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify timestamps are monotonically increasing
|
||||||
|
for i := 1; i < len(files); i++ {
|
||||||
|
if files[i].CreatedAt.Before(files[i-1].CreatedAt) {
|
||||||
|
t.Errorf("file %d timestamp (%v) is before file %d timestamp (%v)",
|
||||||
|
i, files[i].CreatedAt, i-1, files[i-1].CreatedAt)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// createValidLTXData creates a minimal valid LTX file for testing.
|
||||||
|
func createValidLTXData(t *testing.T, minTXID, maxTXID ltx.TXID, ts time.Time) []byte {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
hdr := ltx.Header{
|
||||||
|
Version: ltx.Version,
|
||||||
|
PageSize: 4096,
|
||||||
|
Commit: 1,
|
||||||
|
MinTXID: minTXID,
|
||||||
|
MaxTXID: maxTXID,
|
||||||
|
Timestamp: ts.UnixMilli(),
|
||||||
|
}
|
||||||
|
if minTXID == 1 {
|
||||||
|
hdr.PreApplyChecksum = 0
|
||||||
|
} else {
|
||||||
|
hdr.PreApplyChecksum = ltx.ChecksumFlag
|
||||||
|
}
|
||||||
|
|
||||||
|
headerBytes, err := hdr.MarshalBinary()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("marshal header: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return headerBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestBinaryCompatibility_CLIRestore tests that the litestream CLI can restore
|
||||||
|
// backups created programmatically. This is a basic end-to-end test.
|
||||||
|
func TestBinaryCompatibility_CLIRestore(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip if litestream binary is not available
|
||||||
|
litestreamBin := os.Getenv("LITESTREAM_BIN")
|
||||||
|
if litestreamBin == "" {
|
||||||
|
litestreamBin = "./bin/litestream"
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(litestreamBin); os.IsNotExist(err) {
|
||||||
|
t.Skip("litestream binary not found, skipping CLI test")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Create database with programmatic API
|
||||||
|
db, sqldb := testingutil.MustOpenDBs(t)
|
||||||
|
defer testingutil.MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
if _, err := sqldb.ExecContext(ctx, `CREATE TABLE cli_test(id INTEGER PRIMARY KEY, value TEXT);`); err != nil {
|
||||||
|
t.Fatalf("create table: %v", err)
|
||||||
|
}
|
||||||
|
for i := 0; i < 50; i++ {
|
||||||
|
if _, err := sqldb.ExecContext(ctx, `INSERT INTO cli_test(value) VALUES(?);`, fmt.Sprintf("value-%d", i)); err != nil {
|
||||||
|
t.Fatalf("insert: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := db.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("sync: %v", err)
|
||||||
|
}
|
||||||
|
if err := db.Replica.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("replica sync: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get replica path from the file client
|
||||||
|
fileClient, ok := db.Replica.Client.(*file.ReplicaClient)
|
||||||
|
if !ok {
|
||||||
|
t.Skip("Test requires file replica client")
|
||||||
|
}
|
||||||
|
replicaPath := fileClient.Path()
|
||||||
|
|
||||||
|
// Close the database
|
||||||
|
testingutil.MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
// Restore using CLI
|
||||||
|
restorePath := filepath.Join(t.TempDir(), "cli-restored.db")
|
||||||
|
cmd := exec.CommandContext(ctx, litestreamBin, "restore", "-o", restorePath, "file://"+replicaPath)
|
||||||
|
output, err := cmd.CombinedOutput()
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("CLI restore failed: %v\nOutput: %s", err, output)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify restored database
|
||||||
|
restoredDB := testingutil.MustOpenSQLDB(t, restorePath)
|
||||||
|
defer restoredDB.Close()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
if err := restoredDB.QueryRowContext(ctx, `SELECT COUNT(*) FROM cli_test;`).Scan(&count); err != nil {
|
||||||
|
t.Fatalf("count: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if count != 50 {
|
||||||
|
t.Errorf("CLI restored row count: got %d, want 50", count)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestVersionMigration_DirectoryLayout tests that the current version can
|
||||||
|
// detect and handle different backup directory layouts.
|
||||||
|
func TestVersionMigration_DirectoryLayout(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
// Test current v0.5.x layout (ltx/0/, ltx/1/, ltx/snapshot/)
|
||||||
|
t.Run("CurrentLayout", func(t *testing.T) {
|
||||||
|
replicaDir := t.TempDir()
|
||||||
|
client := file.NewReplicaClient(replicaDir)
|
||||||
|
|
||||||
|
// Create files in expected layout
|
||||||
|
snapshot := createValidLTXData(t, 1, 1, time.Now())
|
||||||
|
if _, err := client.WriteLTXFile(ctx, litestream.SnapshotLevel, 1, 1, bytes.NewReader(snapshot)); err != nil {
|
||||||
|
t.Fatalf("write snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 2; i <= 5; i++ {
|
||||||
|
data := createValidLTXData(t, ltx.TXID(i), ltx.TXID(i), time.Now())
|
||||||
|
if _, err := client.WriteLTXFile(ctx, 0, ltx.TXID(i), ltx.TXID(i), bytes.NewReader(data)); err != nil {
|
||||||
|
t.Fatalf("write L0 %d: %v", i, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify structure
|
||||||
|
snapshotDir := filepath.Join(replicaDir, "ltx", "snapshot")
|
||||||
|
l0Dir := filepath.Join(replicaDir, "ltx", "0")
|
||||||
|
|
||||||
|
if _, err := os.Stat(snapshotDir); err != nil {
|
||||||
|
t.Errorf("snapshot directory not found: %v", err)
|
||||||
|
}
|
||||||
|
if _, err := os.Stat(l0Dir); err != nil {
|
||||||
|
t.Errorf("L0 directory not found: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify files can be listed
|
||||||
|
snapshotItr, err := client.LTXFiles(ctx, litestream.SnapshotLevel, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("list snapshots: %v", err)
|
||||||
|
}
|
||||||
|
var snapshotCount int
|
||||||
|
for snapshotItr.Next() {
|
||||||
|
snapshotCount++
|
||||||
|
}
|
||||||
|
snapshotItr.Close()
|
||||||
|
|
||||||
|
l0Itr, err := client.LTXFiles(ctx, 0, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("list L0: %v", err)
|
||||||
|
}
|
||||||
|
var l0Count int
|
||||||
|
for l0Itr.Next() {
|
||||||
|
l0Count++
|
||||||
|
}
|
||||||
|
l0Itr.Close()
|
||||||
|
|
||||||
|
if snapshotCount != 1 {
|
||||||
|
t.Errorf("snapshot count: got %d, want 1", snapshotCount)
|
||||||
|
}
|
||||||
|
if l0Count != 4 {
|
||||||
|
t.Errorf("L0 count: got %d, want 4", l0Count)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
// Test that old v0.3.x layout (generations/) is not accidentally used
|
||||||
|
t.Run("LegacyLayoutNotUsed", func(t *testing.T) {
|
||||||
|
replicaDir := t.TempDir()
|
||||||
|
|
||||||
|
// Create a generations/ directory (v0.3.x layout)
|
||||||
|
legacyDir := filepath.Join(replicaDir, "generations")
|
||||||
|
if err := os.MkdirAll(legacyDir, 0755); err != nil {
|
||||||
|
t.Fatalf("create legacy dir: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create client and verify it uses new layout
|
||||||
|
client := file.NewReplicaClient(replicaDir)
|
||||||
|
|
||||||
|
snapshot := createValidLTXData(t, 1, 1, time.Now())
|
||||||
|
if _, err := client.WriteLTXFile(ctx, litestream.SnapshotLevel, 1, 1, bytes.NewReader(snapshot)); err != nil {
|
||||||
|
t.Fatalf("write snapshot: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify new layout is used
|
||||||
|
newLayoutDir := filepath.Join(replicaDir, "ltx")
|
||||||
|
if _, err := os.Stat(newLayoutDir); err != nil {
|
||||||
|
t.Errorf("new layout directory not created: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify legacy directory is not used for new files
|
||||||
|
entries, _ := os.ReadDir(legacyDir)
|
||||||
|
if len(entries) > 0 {
|
||||||
|
t.Errorf("legacy directory should remain empty, has %d entries", len(entries))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestCompaction_Compatibility tests that compacted files maintain compatibility
|
||||||
|
// with restore operations.
|
||||||
|
func TestCompaction_Compatibility(t *testing.T) {
|
||||||
|
if testing.Short() {
|
||||||
|
t.Skip("Skipping in short mode")
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
db, sqldb := testingutil.MustOpenDBs(t)
|
||||||
|
defer testingutil.MustCloseDBs(t, db, sqldb)
|
||||||
|
|
||||||
|
if _, err := sqldb.ExecContext(ctx, `CREATE TABLE compact_test(id INTEGER PRIMARY KEY, data BLOB);`); err != nil {
|
||||||
|
t.Fatalf("create table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate many syncs to create L0 files
|
||||||
|
for batch := 0; batch < 20; batch++ {
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
if _, err := sqldb.ExecContext(ctx, `INSERT INTO compact_test(data) VALUES(randomblob(1000));`); err != nil {
|
||||||
|
t.Fatalf("insert: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := db.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("sync: %v", err)
|
||||||
|
}
|
||||||
|
if err := db.Replica.Sync(ctx); err != nil {
|
||||||
|
t.Fatalf("replica sync: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Force compaction to level 1
|
||||||
|
if _, err := db.Compact(ctx, 1); err != nil {
|
||||||
|
t.Logf("compact to L1 (may not have enough files): %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count files at different levels
|
||||||
|
for level := 0; level <= 2; level++ {
|
||||||
|
itr, err := db.Replica.Client.LTXFiles(ctx, level, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("list level %d: %v", level, err)
|
||||||
|
}
|
||||||
|
var count int
|
||||||
|
for itr.Next() {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
itr.Close()
|
||||||
|
t.Logf("Level %d: %d files", level, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restore and verify
|
||||||
|
restorePath := filepath.Join(t.TempDir(), "compacted-restore.db")
|
||||||
|
if err := db.Replica.Restore(ctx, litestream.RestoreOptions{
|
||||||
|
OutputPath: restorePath,
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatalf("restore: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
restoredDB := testingutil.MustOpenSQLDB(t, restorePath)
|
||||||
|
defer restoredDB.Close()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
if err := restoredDB.QueryRowContext(ctx, `SELECT COUNT(*) FROM compact_test;`).Scan(&count); err != nil {
|
||||||
|
t.Fatalf("count: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected := 20 * 5
|
||||||
|
if count != expected {
|
||||||
|
t.Errorf("restored row count: got %d, want %d", count, expected)
|
||||||
|
}
|
||||||
|
|
||||||
|
var integrity string
|
||||||
|
if err := restoredDB.QueryRowContext(ctx, `PRAGMA integrity_check;`).Scan(&integrity); err != nil {
|
||||||
|
t.Fatalf("integrity check: %v", err)
|
||||||
|
}
|
||||||
|
if !strings.Contains(integrity, "ok") {
|
||||||
|
t.Errorf("integrity check failed: %s", integrity)
|
||||||
|
}
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user