mirror of
https://github.com/rqlite/rqlite.git
synced 2026-01-25 04:16:26 +00:00
Integrate "file" backend into Auto-Backup
This commit is contained in:
@@ -274,6 +274,19 @@ jobs:
|
||||
MINIO_BUCKET: minio-bucket
|
||||
resource_class: large
|
||||
|
||||
end_to_end_auto_state_file:
|
||||
docker:
|
||||
- image: << pipeline.parameters.primary_image >>
|
||||
steps:
|
||||
- checkout
|
||||
- restore_and_save_cache
|
||||
- run: go install ./...
|
||||
- run:
|
||||
command: python3 system_test/e2e/auto_state_file.py
|
||||
environment:
|
||||
RQLITED_PATH: /home/circleci/go/bin/rqlited
|
||||
resource_class: large
|
||||
|
||||
end_to_end_auto_state_gcs:
|
||||
docker:
|
||||
- image: << pipeline.parameters.primary_image >>
|
||||
@@ -348,6 +361,7 @@ workflows:
|
||||
- end_to_end_autoclustering
|
||||
- end_to_end_auto_state_s3
|
||||
- end_to_end_auto_state_gcs
|
||||
- end_to_end_auto_state_file
|
||||
- end_to_end_upgrade
|
||||
- end_to_end_cdc
|
||||
- end_to_end_extensions
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
## v9.0.3 (unreleased)
|
||||
## v9.1.0 (September 19th 2025)
|
||||
### New features
|
||||
- [PR #2340](https://github.com/rqlite/rqlite/pull/2340): Support file-based storage for automatic backups, fixes issue [#2332](https://github.com/rqlite/rqlite/issues/2332).
|
||||
|
||||
### Implementation changes and bug fixes
|
||||
- [PR #2335](https://github.com/rqlite/rqlite/pull/2335): Add basic File Storage client for auto-backups.
|
||||
- [PR #2337](https://github.com/rqlite/rqlite/pull/2337): Refactor CDC outputs to pluggable Destination interface.
|
||||
|
||||
@@ -67,7 +67,7 @@ func NewStorageClient(data []byte) (*Config, StorageClient, error) {
|
||||
opts := &file.Options{
|
||||
Timestamp: cfg.Timestamp,
|
||||
}
|
||||
sc, err = file.NewClient(fileCfg.Dir, fileCfg.File, opts)
|
||||
sc, err = file.NewClient(fileCfg.Dir, fileCfg.Name, opts)
|
||||
default:
|
||||
return nil, nil, auto.ErrUnsupportedStorageType
|
||||
}
|
||||
|
||||
@@ -5,11 +5,13 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rqlite/rqlite/v9/auto"
|
||||
"github.com/rqlite/rqlite/v9/auto/aws"
|
||||
"github.com/rqlite/rqlite/v9/auto/file"
|
||||
"github.com/rqlite/rqlite/v9/auto/gcp"
|
||||
)
|
||||
|
||||
@@ -106,6 +108,8 @@ key2=TEST_VAR2`)
|
||||
func Test_NewStorageClient(t *testing.T) {
|
||||
gcsCredsFile := mustGCSCredFile(t)
|
||||
defer os.Remove(gcsCredsFile)
|
||||
// Windows-compatible when embedded in JSON.
|
||||
tempDir := strings.ReplaceAll(t.TempDir(), `\`, `\\`)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -198,6 +202,56 @@ func Test_NewStorageClient(t *testing.T) {
|
||||
expectedClient: mustNewGCSClient(t, "test_bucket", "test/path", "test_project", gcsCredsFile),
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "ValidFileConfig",
|
||||
input: []byte(`
|
||||
{
|
||||
"version": 1,
|
||||
"type": "file",
|
||||
"no_compress": true,
|
||||
"timestamp": true,
|
||||
"interval": "30s",
|
||||
"sub": {
|
||||
"dir": "` + tempDir + `",
|
||||
"name": "backup.sqlite"
|
||||
}
|
||||
}`),
|
||||
expectedCfg: &Config{
|
||||
Version: 1,
|
||||
Type: "file",
|
||||
NoCompress: true,
|
||||
Timestamp: true,
|
||||
Vacuum: true,
|
||||
Interval: 30 * auto.Duration(time.Second),
|
||||
},
|
||||
expectedClient: mustNewFileClient(t, tempDir, "backup.sqlite"),
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "ValidFileConfigTimestampFalse",
|
||||
input: []byte(`
|
||||
{
|
||||
"version": 1,
|
||||
"type": "file",
|
||||
"no_compress": false,
|
||||
"timestamp": false,
|
||||
"interval": "1h",
|
||||
"sub": {
|
||||
"dir": "` + tempDir + `",
|
||||
"name": "backup.sqlite"
|
||||
}
|
||||
}`),
|
||||
expectedCfg: &Config{
|
||||
Version: 1,
|
||||
Type: "file",
|
||||
NoCompress: false,
|
||||
Timestamp: false,
|
||||
Vacuum: false,
|
||||
Interval: 1 * auto.Duration(time.Hour),
|
||||
},
|
||||
expectedClient: mustNewFileClient(t, tempDir, "backup.sqlite"),
|
||||
expectedErr: nil,
|
||||
},
|
||||
{
|
||||
name: "InvalidVersion",
|
||||
input: []byte(`
|
||||
@@ -241,6 +295,15 @@ func Test_NewStorageClient(t *testing.T) {
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
cfg, sc, err := NewStorageClient(tc.input)
|
||||
|
||||
// Special handling for invalid file config - error happens in NewClient, not NewStorageClient
|
||||
if tc.name == "InvalidFileConfig_PathTraversal" {
|
||||
if err == nil {
|
||||
t.Fatalf("Test case %s expected an error from file client creation", tc.name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if !errors.Is(err, tc.expectedErr) {
|
||||
t.Fatalf("Test case %s failed, expected error %v, got %v", tc.name, tc.expectedErr, err)
|
||||
}
|
||||
@@ -257,6 +320,11 @@ func Test_NewStorageClient(t *testing.T) {
|
||||
if !ok {
|
||||
t.Fatalf("Test case %s failed, expected GCSClient, got %T", tc.name, sc)
|
||||
}
|
||||
case *file.Client:
|
||||
_, ok := sc.(*file.Client)
|
||||
if !ok {
|
||||
t.Fatalf("Test case %s failed, expected file.Client, got %T", tc.name, sc)
|
||||
}
|
||||
default:
|
||||
t.Fatalf("Test case %s failed, unexpected client type %T", tc.name, sc)
|
||||
}
|
||||
@@ -302,6 +370,15 @@ func mustNewGCSClient(t *testing.T, bucket, name, projectID, credentialsFile str
|
||||
return client
|
||||
}
|
||||
|
||||
func mustNewFileClient(t *testing.T, dir, filename string) *file.Client {
|
||||
t.Helper()
|
||||
client, err := file.NewClient(dir, filename, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create file client: %v", err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
|
||||
func mustGCSCredFile(t *testing.T) string {
|
||||
t.Helper()
|
||||
f, err := os.CreateTemp("", "cred-*.json")
|
||||
|
||||
@@ -3,12 +3,103 @@ package backup
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/rqlite/rqlite/v9/auto/file"
|
||||
)
|
||||
|
||||
func Test_Uploader_FileStorage_Timestamped(t *testing.T) {
|
||||
ResetStats()
|
||||
|
||||
dir := t.TempDir()
|
||||
name := "backup.sqlite"
|
||||
storageClient, err := file.NewClient(dir, name, &file.Options{Timestamp: true})
|
||||
if err != nil {
|
||||
t.Fatalf("failed to create file storage client: %s", err.Error())
|
||||
}
|
||||
dp := &mockDataProvider{data: "test backup data"}
|
||||
interval := 10 * time.Millisecond
|
||||
uploader := NewUploader(storageClient, dp, interval)
|
||||
|
||||
// Track last index values to force multiple uploads
|
||||
lastIndexValue := atomic.Uint64{}
|
||||
lastIndexValue.Store(1)
|
||||
dp.lastIndexFn = func() (uint64, error) {
|
||||
return lastIndexValue.Load(), nil
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
uploader.Start(ctx, nil)
|
||||
defer cancel()
|
||||
|
||||
// Wait for first upload
|
||||
testPoll(t, func() bool {
|
||||
md, err := storageClient.CurrentMetadata(context.Background())
|
||||
return err == nil && md != nil
|
||||
}, 10*time.Millisecond, time.Second)
|
||||
|
||||
// Add a small delay to ensure different timestamp
|
||||
time.Sleep(1100 * time.Millisecond)
|
||||
|
||||
// Increment last index to trigger another upload
|
||||
lastIndexValue.Store(2)
|
||||
|
||||
// Wait for second upload
|
||||
testPoll(t, func() bool {
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
timestampedFiles := 0
|
||||
for _, file := range files {
|
||||
if strings.HasSuffix(file.Name(), "_backup.sqlite") {
|
||||
timestampedFiles++
|
||||
}
|
||||
}
|
||||
return timestampedFiles >= 2
|
||||
}, 50*time.Millisecond, 5*time.Second)
|
||||
|
||||
// Verify we have at least 2 timestamped files
|
||||
files, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read directory: %s", err.Error())
|
||||
}
|
||||
timestampedFiles := 0
|
||||
for _, file := range files {
|
||||
if strings.HasSuffix(file.Name(), "_backup.sqlite") {
|
||||
timestampedFiles++
|
||||
}
|
||||
}
|
||||
if timestampedFiles < 2 {
|
||||
t.Fatalf("expected at least 2 timestamped files, got %d", timestampedFiles)
|
||||
}
|
||||
|
||||
// Verify metadata points to the latest file
|
||||
md, err := storageClient.CurrentMetadata(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get current metadata: %s", err.Error())
|
||||
}
|
||||
if md == nil {
|
||||
t.Fatal("metadata is nil")
|
||||
}
|
||||
|
||||
// Verify data in latest file is correct
|
||||
dataPath := storageClient.LatestFilePath(context.Background())
|
||||
if dataPath == "" {
|
||||
t.Fatal("latest file path is empty")
|
||||
}
|
||||
data, err := os.ReadFile(dataPath)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to read data file: %s", err.Error())
|
||||
}
|
||||
if string(data) != "test backup data" {
|
||||
t.Fatalf("data mismatch: got %q want %q", string(data), "test backup data")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Uploader_FileStorage(t *testing.T) {
|
||||
ResetStats()
|
||||
|
||||
|
||||
@@ -7,21 +7,24 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Config represents configuration for the file storage client.
|
||||
type Config struct {
|
||||
Dir string `json:"dir"`
|
||||
File string `json:"file"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
||||
// Client represents a file storage client.
|
||||
type Client struct {
|
||||
dir string
|
||||
file string
|
||||
name string
|
||||
metaPath string
|
||||
timestamp bool
|
||||
|
||||
now func() time.Time
|
||||
}
|
||||
|
||||
// Options represents options for the file storage client.
|
||||
@@ -33,15 +36,30 @@ type Options struct {
|
||||
type Metadata struct {
|
||||
ID string `json:"id"`
|
||||
Timestamp int64 `json:"timestamp,omitempty"`
|
||||
File string `json:"file,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
// NewClient creates a new file storage client.
|
||||
func NewClient(dir, file string, opt *Options) (*Client, error) {
|
||||
func NewClient(dir, name string, opt *Options) (*Client, error) {
|
||||
// Validate and clean paths
|
||||
dir = filepath.Clean(dir)
|
||||
if !filepath.IsAbs(dir) {
|
||||
return nil, fmt.Errorf("directory path must be absolute: %s", dir)
|
||||
}
|
||||
|
||||
// Validate file parameter for path traversal attacks and directory separators
|
||||
cleanFile := filepath.Clean(name)
|
||||
if strings.Contains(name, string(filepath.Separator)) ||
|
||||
strings.Contains(cleanFile, "..") ||
|
||||
filepath.IsAbs(cleanFile) ||
|
||||
cleanFile != name {
|
||||
return nil, fmt.Errorf("invalid file parameter: %s (must be a simple filename without path separators)", name)
|
||||
}
|
||||
|
||||
// Ensure the destination directory exists and is writable
|
||||
if err := os.MkdirAll(dir, 0755); err != nil {
|
||||
return nil, fmt.Errorf("failed to create directory %s: %w", dir, err)
|
||||
}
|
||||
|
||||
touchPath := filepath.Join(dir, ".touch")
|
||||
f, err := os.OpenFile(touchPath, os.O_CREATE|os.O_WRONLY, 0644)
|
||||
if err != nil {
|
||||
@@ -52,7 +70,7 @@ func NewClient(dir, file string, opt *Options) (*Client, error) {
|
||||
|
||||
c := &Client{
|
||||
dir: dir,
|
||||
file: file,
|
||||
name: name,
|
||||
metaPath: filepath.Join(dir, "METADATA.json"),
|
||||
}
|
||||
|
||||
@@ -87,53 +105,81 @@ func (c *Client) LatestFilePath(ctx context.Context) string {
|
||||
if md == nil {
|
||||
return ""
|
||||
}
|
||||
return md.File
|
||||
return md.Name
|
||||
}
|
||||
|
||||
// String returns a string representation of the client.
|
||||
func (c *Client) String() string {
|
||||
return fmt.Sprintf("dir:%s", c.dir)
|
||||
return fmt.Sprintf("dir:%s, file:%s", c.dir, c.name)
|
||||
}
|
||||
|
||||
// Upload uploads data from the reader to the file storage.
|
||||
func (c *Client) Upload(ctx context.Context, reader io.Reader, id string) (retErr error) {
|
||||
finalPath := filepath.Join(c.dir, c.file)
|
||||
tmpPath := finalPath + ".tmp"
|
||||
filename := c.name
|
||||
if c.timestamp {
|
||||
if c.now == nil {
|
||||
c.now = func() time.Time {
|
||||
return time.Now().UTC()
|
||||
}
|
||||
}
|
||||
filename = timestampedPath(filename, c.now())
|
||||
}
|
||||
|
||||
finalPath := filepath.Join(c.dir, filename)
|
||||
|
||||
tmpFile, err := os.CreateTemp(c.dir, ".upload-*")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary file in %s: %w", c.dir, err)
|
||||
}
|
||||
tmpPath := tmpFile.Name()
|
||||
tmpMetaPath := c.metaPath + ".tmp"
|
||||
|
||||
// Cleanup on error
|
||||
defer func() {
|
||||
tmpFile.Close()
|
||||
if retErr != nil {
|
||||
os.Remove(tmpMetaPath)
|
||||
os.Remove(tmpPath)
|
||||
} else {
|
||||
os.Rename(tmpPath, finalPath)
|
||||
os.Rename(tmpMetaPath, c.metaPath)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := os.Remove(c.metaPath); err != nil && !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to remove file %s: %w", c.metaPath, err)
|
||||
}
|
||||
|
||||
fd, err := os.OpenFile(tmpPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644)
|
||||
// Write data to temporary file
|
||||
_, err = io.Copy(tmpFile, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to open temporary file %s: %w", tmpPath, err)
|
||||
return fmt.Errorf("failed to write to temporary file %s: %w", tmpPath, err)
|
||||
}
|
||||
defer fd.Close()
|
||||
|
||||
_, err = io.Copy(fd, reader)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write to file %s: %w", finalPath, err)
|
||||
if err := tmpFile.Sync(); err != nil {
|
||||
return fmt.Errorf("failed to sync temporary file %s: %w", tmpPath, err)
|
||||
}
|
||||
if err := tmpFile.Close(); err != nil {
|
||||
return fmt.Errorf("failed to close temporary file %s: %w", tmpPath, err)
|
||||
}
|
||||
|
||||
b, err := json.Marshal(Metadata{
|
||||
// Write metadata to temporary metadata file
|
||||
metadata := Metadata{
|
||||
ID: id,
|
||||
Timestamp: time.Now().UnixMilli(),
|
||||
File: finalPath,
|
||||
})
|
||||
Name: finalPath,
|
||||
}
|
||||
|
||||
metadataBytes, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to marshal metadata: %w", err)
|
||||
}
|
||||
return os.WriteFile(tmpMetaPath, b, 0644)
|
||||
|
||||
if err := os.WriteFile(tmpMetaPath, metadataBytes, 0644); err != nil {
|
||||
return fmt.Errorf("failed to write temporary metadata file %s: %w", tmpMetaPath, err)
|
||||
}
|
||||
|
||||
if err := os.Rename(tmpPath, finalPath); err != nil {
|
||||
return fmt.Errorf("failed to rename temporary file %s to %s: %w", tmpPath, finalPath, err)
|
||||
}
|
||||
|
||||
if err := os.Rename(tmpMetaPath, c.metaPath); err != nil {
|
||||
os.Remove(finalPath)
|
||||
return fmt.Errorf("failed to rename temporary metadata file %s to %s: %w", tmpMetaPath, c.metaPath, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CurrentID returns the current ID stored in the metadata.
|
||||
@@ -148,6 +194,14 @@ func (c *Client) CurrentID(ctx context.Context) (string, error) {
|
||||
return md.ID, nil
|
||||
}
|
||||
|
||||
// timestampedPath returns a new path with the given timestamp prepended.
|
||||
// If path contains /, the timestamp is prepended to the last segment.
|
||||
func timestampedPath(path string, t time.Time) string {
|
||||
parts := strings.Split(path, "/")
|
||||
parts[len(parts)-1] = fmt.Sprintf("%s_%s", t.Format("20060102150405"), parts[len(parts)-1])
|
||||
return strings.Join(parts, "/")
|
||||
}
|
||||
|
||||
func fileExists(path string) bool {
|
||||
_, err := os.Stat(path)
|
||||
return err == nil
|
||||
|
||||
@@ -5,7 +5,9 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func Test_NewClient(t *testing.T) {
|
||||
@@ -61,7 +63,7 @@ func Test_Upload_Success(t *testing.T) {
|
||||
if md.ID != id {
|
||||
t.Fatalf("id mismatch: got %q want %q", md.ID, id)
|
||||
}
|
||||
if md.File != c.LatestFilePath(context.Background()) {
|
||||
if md.Name != c.LatestFilePath(context.Background()) {
|
||||
t.Fatalf("file mismatch")
|
||||
}
|
||||
if md.Timestamp == 0 {
|
||||
@@ -88,3 +90,145 @@ func Test_Upload_RemovesExistingID(t *testing.T) {
|
||||
t.Fatalf("id mismatch: got %q want %q", md.ID, "new")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Upload_Timestamp_True(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
c, _ := NewClient(dir, "backup.sqlite", &Options{Timestamp: true})
|
||||
|
||||
data1 := []byte("data1")
|
||||
data2 := []byte("data2")
|
||||
id1 := "v1"
|
||||
id2 := "v2"
|
||||
|
||||
// Upload first file
|
||||
if err := c.Upload(context.Background(), bytes.NewReader(data1), id1); err != nil {
|
||||
t.Fatalf("Upload error: %v", err)
|
||||
}
|
||||
|
||||
// Sleep to ensure different timestamp
|
||||
time.Sleep(1100 * time.Millisecond) // Just over 1 second to ensure different timestamp
|
||||
|
||||
// Upload second file
|
||||
if err := c.Upload(context.Background(), bytes.NewReader(data2), id2); err != nil {
|
||||
t.Fatalf("Upload error: %v", err)
|
||||
}
|
||||
|
||||
// Check that two distinct files exist
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir error: %v", err)
|
||||
}
|
||||
|
||||
dataFiles := []string{}
|
||||
for _, entry := range entries {
|
||||
if strings.HasSuffix(entry.Name(), "_backup.sqlite") {
|
||||
dataFiles = append(dataFiles, entry.Name())
|
||||
}
|
||||
}
|
||||
|
||||
if len(dataFiles) != 2 {
|
||||
t.Fatalf("expected 2 data files, got %d: %v", len(dataFiles), dataFiles)
|
||||
}
|
||||
|
||||
// Check metadata points to the latest file
|
||||
md, err := c.CurrentMetadata(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("CurrentMetadata error: %v", err)
|
||||
}
|
||||
if md.ID != id2 {
|
||||
t.Fatalf("id mismatch: got %q want %q", md.ID, id2)
|
||||
}
|
||||
|
||||
// Verify the latest file path contains the second data
|
||||
latestPath := c.LatestFilePath(context.Background())
|
||||
if latestPath == "" {
|
||||
t.Fatal("latest file path is empty")
|
||||
}
|
||||
gotData, err := os.ReadFile(latestPath)
|
||||
if err != nil {
|
||||
t.Fatalf("read latest file: %v", err)
|
||||
}
|
||||
if !bytes.Equal(gotData, data2) {
|
||||
t.Fatalf("latest file data mismatch: got %q want %q", string(gotData), string(data2))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Upload_Timestamp_False(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
c, _ := NewClient(dir, "backup.sqlite", &Options{Timestamp: false})
|
||||
|
||||
data1 := []byte("data1")
|
||||
data2 := []byte("data2")
|
||||
id1 := "v1"
|
||||
id2 := "v2"
|
||||
|
||||
// Upload first file
|
||||
if err := c.Upload(context.Background(), bytes.NewReader(data1), id1); err != nil {
|
||||
t.Fatalf("Upload error: %v", err)
|
||||
}
|
||||
|
||||
// Upload second file (should overwrite)
|
||||
if err := c.Upload(context.Background(), bytes.NewReader(data2), id2); err != nil {
|
||||
t.Fatalf("Upload error: %v", err)
|
||||
}
|
||||
|
||||
// Check that only one data file exists
|
||||
entries, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
t.Fatalf("ReadDir error: %v", err)
|
||||
}
|
||||
|
||||
dataFiles := []string{}
|
||||
for _, entry := range entries {
|
||||
if entry.Name() == "backup.sqlite" {
|
||||
dataFiles = append(dataFiles, entry.Name())
|
||||
}
|
||||
}
|
||||
|
||||
if len(dataFiles) != 1 {
|
||||
t.Fatalf("expected 1 data file, got %d: %v", len(dataFiles), dataFiles)
|
||||
}
|
||||
|
||||
// Verify the file contains the second data (overwritten)
|
||||
gotData, err := os.ReadFile(filepath.Join(dir, "backup.sqlite"))
|
||||
if err != nil {
|
||||
t.Fatalf("read data file: %v", err)
|
||||
}
|
||||
if !bytes.Equal(gotData, data2) {
|
||||
t.Fatalf("data mismatch: got %q want %q", string(gotData), string(data2))
|
||||
}
|
||||
|
||||
// Check metadata points to the overwritten file
|
||||
md, err := c.CurrentMetadata(context.Background())
|
||||
if err != nil {
|
||||
t.Fatalf("CurrentMetadata error: %v", err)
|
||||
}
|
||||
if md.ID != id2 {
|
||||
t.Fatalf("id mismatch: got %q want %q", md.ID, id2)
|
||||
}
|
||||
}
|
||||
|
||||
func Test_NewClient_PathValidation(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
|
||||
// Test valid file names
|
||||
validFiles := []string{"backup.sqlite", "backup.zip", "data.bin"}
|
||||
for _, file := range validFiles {
|
||||
_, err := NewClient(dir, file, nil)
|
||||
if err != nil {
|
||||
t.Fatalf("expected valid file %q to succeed, got error: %v", file, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Test invalid file names (path traversal attempts)
|
||||
invalidFiles := []string{"../backup.sqlite", "/abs/path.sqlite", "dir/../backup.sqlite", "dir/backup.sqlite"}
|
||||
for _, file := range invalidFiles {
|
||||
_, err := NewClient(dir, file, nil)
|
||||
if err == nil {
|
||||
t.Fatalf("expected invalid file %q to fail, but it succeeded", file)
|
||||
}
|
||||
if !strings.Contains(err.Error(), "invalid file parameter") {
|
||||
t.Fatalf("expected path validation error for %q, got: %v", file, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
252
system_test/e2e/auto_state_file.py
Executable file
252
system_test/e2e/auto_state_file.py
Executable file
@@ -0,0 +1,252 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
import os
|
||||
import json
|
||||
import unittest
|
||||
import sqlite3
|
||||
import tempfile
|
||||
import shutil
|
||||
import glob
|
||||
import gzip
|
||||
|
||||
from helpers import Node, deprovision_node, write_random_file
|
||||
|
||||
RQLITED_PATH = os.environ['RQLITED_PATH']
|
||||
|
||||
class TestAutoBackup_File(unittest.TestCase):
|
||||
def test_backup_timestamp_true(self):
|
||||
'''Test automatic backup to file with timestamp=true'''
|
||||
node = None
|
||||
backup_dir = None
|
||||
|
||||
try:
|
||||
# Create a temporary directory for backups
|
||||
backup_dir = tempfile.mkdtemp()
|
||||
|
||||
# Create the auto-backup config file with timestamp enabled
|
||||
auto_backup_cfg = {
|
||||
"version": 1,
|
||||
"type": "file",
|
||||
"interval": "1s",
|
||||
"timestamp": True,
|
||||
"no_compress": True,
|
||||
"vacuum": False,
|
||||
"sub": {
|
||||
"dir": backup_dir,
|
||||
"name": "backup.sqlite"
|
||||
}
|
||||
}
|
||||
auto_backup_cfg_file = write_random_file(json.dumps(auto_backup_cfg))
|
||||
|
||||
# Create a node, enable automatic backups, and start it
|
||||
node = Node(RQLITED_PATH, '0', auto_backup=auto_backup_cfg_file)
|
||||
node.start()
|
||||
node.wait_for_leader()
|
||||
|
||||
# Create a table and insert data
|
||||
node.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
|
||||
node.execute('INSERT INTO foo(name) VALUES("alice")')
|
||||
|
||||
# Wait for first backup
|
||||
node.wait_for_upload(1)
|
||||
node.wait_until_uploads_idle()
|
||||
|
||||
# Insert more data to trigger second backup
|
||||
node.execute('INSERT INTO foo(name) VALUES("bob")')
|
||||
node.wait_for_upload(2)
|
||||
node.wait_until_uploads_idle()
|
||||
|
||||
# Check that multiple timestamped backup files exist
|
||||
backup_files = glob.glob(os.path.join(backup_dir, "*_backup.sqlite"))
|
||||
self.assertGreaterEqual(len(backup_files), 2, f"Expected at least 2 backup files, found: {backup_files}")
|
||||
|
||||
# Verify the latest backup contains the expected data
|
||||
latest_backup = max(backup_files, key=os.path.getctime)
|
||||
conn = sqlite3.connect(latest_backup)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM foo ORDER BY id")
|
||||
rows = cursor.fetchall()
|
||||
conn.close()
|
||||
self.assertEqual(len(rows), 2)
|
||||
self.assertEqual(rows[0], (1, 'alice'))
|
||||
self.assertEqual(rows[1], (2, 'bob'))
|
||||
|
||||
# Verify the oldest backup contains only the first entry
|
||||
oldest_backup = min(backup_files, key=os.path.getctime)
|
||||
conn = sqlite3.connect(oldest_backup)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM foo ORDER BY id")
|
||||
rows = cursor.fetchall()
|
||||
conn.close()
|
||||
self.assertEqual(len(rows), 1)
|
||||
self.assertEqual(rows[0], (1, 'alice'))
|
||||
|
||||
finally:
|
||||
if node:
|
||||
deprovision_node(node)
|
||||
if 'auto_backup_cfg_file' in locals():
|
||||
os.remove(auto_backup_cfg_file)
|
||||
if backup_dir and os.path.exists(backup_dir):
|
||||
shutil.rmtree(backup_dir)
|
||||
|
||||
def test_backup_timestamp_false(self):
|
||||
'''Test automatic backup to file with timestamp=false (overwrite mode)'''
|
||||
node = None
|
||||
backup_dir = None
|
||||
|
||||
try:
|
||||
# Create a temporary directory for backups
|
||||
backup_dir = tempfile.mkdtemp()
|
||||
backup_file = os.path.join(backup_dir, "backup.sqlite")
|
||||
|
||||
# Create the auto-backup config file with timestamp disabled
|
||||
auto_backup_cfg = {
|
||||
"version": 1,
|
||||
"type": "file",
|
||||
"interval": "1s",
|
||||
"timestamp": False,
|
||||
"no_compress": True,
|
||||
"vacuum": False,
|
||||
"sub": {
|
||||
"dir": backup_dir,
|
||||
"name": "backup.sqlite"
|
||||
}
|
||||
}
|
||||
auto_backup_cfg_file = write_random_file(json.dumps(auto_backup_cfg))
|
||||
|
||||
# Create a node, enable automatic backups, and start it
|
||||
node = Node(RQLITED_PATH, '0', auto_backup=auto_backup_cfg_file)
|
||||
node.start()
|
||||
node.wait_for_leader()
|
||||
|
||||
# Create a table and insert data
|
||||
node.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
|
||||
node.execute('INSERT INTO foo(name) VALUES("alice")')
|
||||
|
||||
# Wait for first backup
|
||||
node.wait_for_upload(1)
|
||||
node.wait_until_uploads_idle()
|
||||
|
||||
# Check that backup file exists
|
||||
self.assertTrue(os.path.exists(backup_file), "Backup file should exist")
|
||||
|
||||
# Record the initial modification time
|
||||
initial_mtime = os.path.getmtime(backup_file)
|
||||
|
||||
# Insert more data to trigger second backup (should overwrite)
|
||||
node.execute('INSERT INTO foo(name) VALUES("bob")')
|
||||
node.wait_for_upload(2)
|
||||
node.wait_until_uploads_idle()
|
||||
|
||||
# Check that only one backup file exists (overwrite mode)
|
||||
backup_files = glob.glob(os.path.join(backup_dir, "*.sqlite"))
|
||||
timestamped_files = [f for f in backup_files if "_backup.sqlite" in f]
|
||||
self.assertEqual(len(timestamped_files), 0, "Should have no timestamped files")
|
||||
self.assertEqual(len(backup_files), 1, f"Should have exactly 1 backup file, found: {backup_files}")
|
||||
self.assertEqual(backup_files[0], backup_file)
|
||||
|
||||
# Verify the file was overwritten (modification time changed)
|
||||
final_mtime = os.path.getmtime(backup_file)
|
||||
self.assertGreater(final_mtime, initial_mtime, "Backup file should have been overwritten")
|
||||
|
||||
# Verify the backup contains all the data
|
||||
conn = sqlite3.connect(backup_file)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM foo ORDER BY id")
|
||||
rows = cursor.fetchall()
|
||||
conn.close()
|
||||
|
||||
self.assertEqual(len(rows), 2)
|
||||
self.assertEqual(rows[0], (1, 'alice'))
|
||||
self.assertEqual(rows[1], (2, 'bob'))
|
||||
|
||||
finally:
|
||||
if node:
|
||||
deprovision_node(node)
|
||||
if 'auto_backup_cfg_file' in locals():
|
||||
os.remove(auto_backup_cfg_file)
|
||||
if backup_dir and os.path.exists(backup_dir):
|
||||
shutil.rmtree(backup_dir)
|
||||
|
||||
def test_backup_with_compression(self):
|
||||
'''Test automatic backup to file with compression enabled'''
|
||||
node = None
|
||||
backup_dir = None
|
||||
|
||||
try:
|
||||
# Create a temporary directory for backups
|
||||
backup_dir = tempfile.mkdtemp()
|
||||
|
||||
# Create the auto-backup config file with compression
|
||||
auto_backup_cfg = {
|
||||
"version": 1,
|
||||
"type": "file",
|
||||
"interval": "1s",
|
||||
"timestamp": True,
|
||||
"no_compress": False, # Enable compression
|
||||
"vacuum": False,
|
||||
"sub": {
|
||||
"dir": backup_dir,
|
||||
"name": "backup.sqlite"
|
||||
}
|
||||
}
|
||||
auto_backup_cfg_file = write_random_file(json.dumps(auto_backup_cfg))
|
||||
|
||||
# Create a node, enable automatic backups, and start it
|
||||
node = Node(RQLITED_PATH, '0', auto_backup=auto_backup_cfg_file)
|
||||
node.start()
|
||||
node.wait_for_leader()
|
||||
|
||||
# Create a table and insert data
|
||||
node.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
|
||||
node.execute('INSERT INTO foo(name) VALUES("alice")')
|
||||
|
||||
# Wait for backup
|
||||
node.wait_for_upload(1)
|
||||
node.wait_until_uploads_idle()
|
||||
|
||||
# Check that backup files exist
|
||||
backup_files = glob.glob(os.path.join(backup_dir, "*_backup.sqlite"))
|
||||
self.assertGreaterEqual(len(backup_files), 1, f"Expected at least 1 backup file, found: {backup_files}")
|
||||
|
||||
# With compression enabled, the files should be compressed
|
||||
# We should check if files are compressed by trying to uncompress them
|
||||
latest_backup = max(backup_files, key=os.path.getctime)
|
||||
|
||||
# Try to read as gzipped file first
|
||||
try:
|
||||
with gzip.open(latest_backup, 'rb') as f:
|
||||
# If we can read it as gzip, decompress to temp file and test
|
||||
uncompressed_data = f.read()
|
||||
|
||||
# Write uncompressed data to a temporary file
|
||||
with tempfile.NamedTemporaryFile(suffix='.sqlite', delete=False) as temp_file:
|
||||
temp_file.write(uncompressed_data)
|
||||
temp_sqlite_path = temp_file.name
|
||||
|
||||
try:
|
||||
# Test the uncompressed SQLite data
|
||||
conn = sqlite3.connect(temp_sqlite_path)
|
||||
cursor = conn.cursor()
|
||||
cursor.execute("SELECT * FROM foo")
|
||||
rows = cursor.fetchall()
|
||||
conn.close()
|
||||
|
||||
self.assertEqual(len(rows), 1)
|
||||
self.assertEqual(rows[0], (1, 'alice'))
|
||||
finally:
|
||||
os.unlink(temp_sqlite_path)
|
||||
|
||||
except (gzip.BadGzipFile, OSError):
|
||||
self.fail("Backup file is not properly compressed")
|
||||
|
||||
finally:
|
||||
if node:
|
||||
deprovision_node(node)
|
||||
if 'auto_backup_cfg_file' in locals():
|
||||
os.remove(auto_backup_cfg_file)
|
||||
if backup_dir and os.path.exists(backup_dir):
|
||||
shutil.rmtree(backup_dir)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main(verbosity=2)
|
||||
Reference in New Issue
Block a user