mirror of
https://github.com/benbjohnson/litestream.git
synced 2026-01-25 05:06:30 +00:00
feat(vfs): add VFS compaction support via shared Compactor type (#979)
Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -279,9 +279,9 @@ func DefaultConfig() Config {
|
|||||||
defaultShutdownSyncInterval := litestream.DefaultShutdownSyncInterval
|
defaultShutdownSyncInterval := litestream.DefaultShutdownSyncInterval
|
||||||
return Config{
|
return Config{
|
||||||
Levels: []*CompactionLevelConfig{
|
Levels: []*CompactionLevelConfig{
|
||||||
{Interval: 30 * time.Second},
|
{Interval: litestream.DefaultCompactionLevels[1].Interval},
|
||||||
{Interval: 5 * time.Minute},
|
{Interval: litestream.DefaultCompactionLevels[2].Interval},
|
||||||
{Interval: 1 * time.Hour},
|
{Interval: litestream.DefaultCompactionLevels[3].Interval},
|
||||||
},
|
},
|
||||||
Snapshot: SnapshotConfig{
|
Snapshot: SnapshotConfig{
|
||||||
Interval: &defaultSnapshotInterval,
|
Interval: &defaultSnapshotInterval,
|
||||||
|
|||||||
@@ -8,6 +8,16 @@ import (
|
|||||||
// SnapshotLevel represents the level which full snapshots are held.
|
// SnapshotLevel represents the level which full snapshots are held.
|
||||||
const SnapshotLevel = 9
|
const SnapshotLevel = 9
|
||||||
|
|
||||||
|
// DefaultCompactionLevels provides the canonical default compaction configuration.
|
||||||
|
// Level 0 is raw LTX files, higher levels compact at increasing intervals.
|
||||||
|
// These values are also used by cmd/litestream DefaultConfig().
|
||||||
|
var DefaultCompactionLevels = CompactionLevels{
|
||||||
|
{Level: 0, Interval: 0},
|
||||||
|
{Level: 1, Interval: 30 * time.Second},
|
||||||
|
{Level: 2, Interval: 5 * time.Minute},
|
||||||
|
{Level: 3, Interval: time.Hour},
|
||||||
|
}
|
||||||
|
|
||||||
// CompactionLevel represents a single part of a multi-level compaction.
|
// CompactionLevel represents a single part of a multi-level compaction.
|
||||||
// Each level merges LTX files from the previous level into larger time granularities.
|
// Each level merges LTX files from the previous level into larger time granularities.
|
||||||
type CompactionLevel struct {
|
type CompactionLevel struct {
|
||||||
|
|||||||
355
compactor.go
Normal file
355
compactor.go
Normal file
@@ -0,0 +1,355 @@
|
|||||||
|
package litestream
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"os"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/superfly/ltx"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Compactor handles compaction and retention for LTX files.
|
||||||
|
// It operates solely through the ReplicaClient interface, making it
|
||||||
|
// suitable for both DB (with local file caching) and VFS (remote-only).
|
||||||
|
type Compactor struct {
|
||||||
|
client ReplicaClient
|
||||||
|
logger *slog.Logger
|
||||||
|
|
||||||
|
// LocalFileOpener optionally opens a local LTX file for compaction.
|
||||||
|
// If nil or returns os.ErrNotExist, falls back to remote.
|
||||||
|
// This is used by DB to prefer local files over remote for consistency.
|
||||||
|
LocalFileOpener func(level int, minTXID, maxTXID ltx.TXID) (io.ReadCloser, error)
|
||||||
|
|
||||||
|
// LocalFileDeleter optionally deletes local LTX files after retention.
|
||||||
|
// If nil, only remote files are deleted.
|
||||||
|
LocalFileDeleter func(level int, minTXID, maxTXID ltx.TXID) error
|
||||||
|
|
||||||
|
// CacheGetter optionally retrieves cached MaxLTXFileInfo for a level.
|
||||||
|
// If nil, max file info is always fetched from remote.
|
||||||
|
CacheGetter func(level int) (*ltx.FileInfo, bool)
|
||||||
|
|
||||||
|
// CacheSetter optionally stores MaxLTXFileInfo for a level.
|
||||||
|
// If nil, max file info is not cached.
|
||||||
|
CacheSetter func(level int, info *ltx.FileInfo)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCompactor creates a new Compactor with the given client and logger.
|
||||||
|
func NewCompactor(client ReplicaClient, logger *slog.Logger) *Compactor {
|
||||||
|
if logger == nil {
|
||||||
|
logger = slog.Default()
|
||||||
|
}
|
||||||
|
return &Compactor{
|
||||||
|
client: client,
|
||||||
|
logger: logger,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Client returns the underlying ReplicaClient.
|
||||||
|
func (c *Compactor) Client() ReplicaClient {
|
||||||
|
return c.client
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetClient updates the ReplicaClient.
|
||||||
|
// This is used by DB when the Replica is assigned after construction.
|
||||||
|
func (c *Compactor) SetClient(client ReplicaClient) {
|
||||||
|
c.client = client
|
||||||
|
}
|
||||||
|
|
||||||
|
// MaxLTXFileInfo returns metadata for the last LTX file in a level.
|
||||||
|
// Uses cache if available, otherwise fetches from remote.
|
||||||
|
func (c *Compactor) MaxLTXFileInfo(ctx context.Context, level int) (ltx.FileInfo, error) {
|
||||||
|
if c.CacheGetter != nil {
|
||||||
|
if info, ok := c.CacheGetter(level); ok {
|
||||||
|
return *info, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
itr, err := c.client.LTXFiles(ctx, level, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return ltx.FileInfo{}, err
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var info ltx.FileInfo
|
||||||
|
for itr.Next() {
|
||||||
|
item := itr.Item()
|
||||||
|
if item.MaxTXID > info.MaxTXID {
|
||||||
|
info = *item
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.CacheSetter != nil && info.MaxTXID > 0 {
|
||||||
|
c.CacheSetter(level, &info)
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, itr.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compact compacts source level files into the destination level.
|
||||||
|
// Returns ErrNoCompaction if there are no files to compact.
|
||||||
|
func (c *Compactor) Compact(ctx context.Context, dstLevel int) (*ltx.FileInfo, error) {
|
||||||
|
srcLevel := dstLevel - 1
|
||||||
|
|
||||||
|
prevMaxInfo, err := c.MaxLTXFileInfo(ctx, dstLevel)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot determine max ltx file for destination level: %w", err)
|
||||||
|
}
|
||||||
|
seekTXID := prevMaxInfo.MaxTXID + 1
|
||||||
|
|
||||||
|
itr, err := c.client.LTXFiles(ctx, srcLevel, seekTXID, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("source ltx files after %s: %w", seekTXID, err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var rdrs []io.Reader
|
||||||
|
defer func() {
|
||||||
|
for _, rd := range rdrs {
|
||||||
|
if closer, ok := rd.(io.Closer); ok {
|
||||||
|
_ = closer.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
var minTXID, maxTXID ltx.TXID
|
||||||
|
for itr.Next() {
|
||||||
|
info := itr.Item()
|
||||||
|
|
||||||
|
if minTXID == 0 || info.MinTXID < minTXID {
|
||||||
|
minTXID = info.MinTXID
|
||||||
|
}
|
||||||
|
if maxTXID == 0 || info.MaxTXID > maxTXID {
|
||||||
|
maxTXID = info.MaxTXID
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.LocalFileOpener != nil {
|
||||||
|
if f, err := c.LocalFileOpener(srcLevel, info.MinTXID, info.MaxTXID); err == nil {
|
||||||
|
rdrs = append(rdrs, f)
|
||||||
|
continue
|
||||||
|
} else if !os.IsNotExist(err) {
|
||||||
|
return nil, fmt.Errorf("open local ltx file: %w", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
f, err := c.client.OpenLTXFile(ctx, info.Level, info.MinTXID, info.MaxTXID, 0, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("open ltx file: %w", err)
|
||||||
|
}
|
||||||
|
rdrs = append(rdrs, f)
|
||||||
|
}
|
||||||
|
if len(rdrs) == 0 {
|
||||||
|
return nil, ErrNoCompaction
|
||||||
|
}
|
||||||
|
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
go func() {
|
||||||
|
comp, err := ltx.NewCompactor(pw, rdrs)
|
||||||
|
if err != nil {
|
||||||
|
pw.CloseWithError(fmt.Errorf("new ltx compactor: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
comp.HeaderFlags = ltx.HeaderFlagNoChecksum
|
||||||
|
_ = pw.CloseWithError(comp.Compact(ctx))
|
||||||
|
}()
|
||||||
|
|
||||||
|
info, err := c.client.WriteLTXFile(ctx, dstLevel, minTXID, maxTXID, pr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("write ltx file: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.CacheSetter != nil {
|
||||||
|
c.CacheSetter(dstLevel, info)
|
||||||
|
}
|
||||||
|
|
||||||
|
return info, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnforceSnapshotRetention enforces retention of snapshot level files by timestamp.
|
||||||
|
// Files older than the retention duration are deleted (except the newest is always kept).
|
||||||
|
// Returns the minimum snapshot TXID still retained (useful for cascading retention to lower levels).
|
||||||
|
func (c *Compactor) EnforceSnapshotRetention(ctx context.Context, retention time.Duration) (ltx.TXID, error) {
|
||||||
|
timestamp := time.Now().Add(-retention)
|
||||||
|
c.logger.Debug("enforcing snapshot retention", "timestamp", timestamp)
|
||||||
|
|
||||||
|
itr, err := c.client.LTXFiles(ctx, SnapshotLevel, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("fetch ltx files: %w", err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var deleted []*ltx.FileInfo
|
||||||
|
var lastInfo *ltx.FileInfo
|
||||||
|
var minSnapshotTXID ltx.TXID
|
||||||
|
|
||||||
|
for itr.Next() {
|
||||||
|
info := itr.Item()
|
||||||
|
lastInfo = info
|
||||||
|
|
||||||
|
if info.CreatedAt.Before(timestamp) {
|
||||||
|
deleted = append(deleted, info)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if minSnapshotTXID == 0 || info.MaxTXID < minSnapshotTXID {
|
||||||
|
minSnapshotTXID = info.MaxTXID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deleted) > 0 && deleted[len(deleted)-1] == lastInfo {
|
||||||
|
deleted = deleted[:len(deleted)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.client.DeleteLTXFiles(ctx, deleted); err != nil {
|
||||||
|
return 0, fmt.Errorf("remove ltx files: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.LocalFileDeleter != nil {
|
||||||
|
for _, info := range deleted {
|
||||||
|
c.logger.Debug("deleting local ltx file",
|
||||||
|
"level", SnapshotLevel,
|
||||||
|
"minTXID", info.MinTXID,
|
||||||
|
"maxTXID", info.MaxTXID)
|
||||||
|
if err := c.LocalFileDeleter(SnapshotLevel, info.MinTXID, info.MaxTXID); err != nil {
|
||||||
|
c.logger.Error("failed to remove local ltx file", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return minSnapshotTXID, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnforceRetentionByTXID deletes files at the given level with maxTXID below the target.
|
||||||
|
// Always keeps at least one file.
|
||||||
|
func (c *Compactor) EnforceRetentionByTXID(ctx context.Context, level int, txID ltx.TXID) error {
|
||||||
|
c.logger.Debug("enforcing retention", "level", level, "txid", txID)
|
||||||
|
|
||||||
|
itr, err := c.client.LTXFiles(ctx, level, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("fetch ltx files: %w", err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var deleted []*ltx.FileInfo
|
||||||
|
var lastInfo *ltx.FileInfo
|
||||||
|
for itr.Next() {
|
||||||
|
info := itr.Item()
|
||||||
|
lastInfo = info
|
||||||
|
|
||||||
|
if info.MaxTXID < txID {
|
||||||
|
deleted = append(deleted, info)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deleted) > 0 && deleted[len(deleted)-1] == lastInfo {
|
||||||
|
deleted = deleted[:len(deleted)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.client.DeleteLTXFiles(ctx, deleted); err != nil {
|
||||||
|
return fmt.Errorf("remove ltx files: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.LocalFileDeleter != nil {
|
||||||
|
for _, info := range deleted {
|
||||||
|
c.logger.Debug("deleting local ltx file",
|
||||||
|
"level", level,
|
||||||
|
"minTXID", info.MinTXID,
|
||||||
|
"maxTXID", info.MaxTXID)
|
||||||
|
if err := c.LocalFileDeleter(level, info.MinTXID, info.MaxTXID); err != nil {
|
||||||
|
c.logger.Error("failed to remove local ltx file", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnforceL0Retention retains L0 files based on L1 compaction progress and time.
|
||||||
|
// Files are only deleted if they have been compacted into L1 AND are older than retention.
|
||||||
|
// This ensures contiguous L0 coverage for VFS reads.
|
||||||
|
func (c *Compactor) EnforceL0Retention(ctx context.Context, retention time.Duration) error {
|
||||||
|
if retention <= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Debug("enforcing l0 retention", "retention", retention)
|
||||||
|
|
||||||
|
itr, err := c.client.LTXFiles(ctx, 1, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("fetch l1 files: %w", err)
|
||||||
|
}
|
||||||
|
var maxL1TXID ltx.TXID
|
||||||
|
for itr.Next() {
|
||||||
|
info := itr.Item()
|
||||||
|
if info.MaxTXID > maxL1TXID {
|
||||||
|
maxL1TXID = info.MaxTXID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err := itr.Close(); err != nil {
|
||||||
|
return fmt.Errorf("close l1 iterator: %w", err)
|
||||||
|
}
|
||||||
|
if maxL1TXID == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
threshold := time.Now().Add(-retention)
|
||||||
|
itr, err = c.client.LTXFiles(ctx, 0, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("fetch l0 files: %w", err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var (
|
||||||
|
deleted []*ltx.FileInfo
|
||||||
|
lastInfo *ltx.FileInfo
|
||||||
|
processedAll = true
|
||||||
|
)
|
||||||
|
for itr.Next() {
|
||||||
|
info := itr.Item()
|
||||||
|
lastInfo = info
|
||||||
|
|
||||||
|
createdAt := info.CreatedAt
|
||||||
|
if createdAt.IsZero() {
|
||||||
|
createdAt = threshold
|
||||||
|
}
|
||||||
|
|
||||||
|
if createdAt.After(threshold) {
|
||||||
|
processedAll = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if info.MaxTXID <= maxL1TXID {
|
||||||
|
deleted = append(deleted, info)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if processedAll && len(deleted) > 0 && lastInfo != nil && deleted[len(deleted)-1] == lastInfo {
|
||||||
|
deleted = deleted[:len(deleted)-1]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(deleted) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := c.client.DeleteLTXFiles(ctx, deleted); err != nil {
|
||||||
|
return fmt.Errorf("remove expired l0 files: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.LocalFileDeleter != nil {
|
||||||
|
for _, info := range deleted {
|
||||||
|
c.logger.Debug("deleting expired local l0 file",
|
||||||
|
"minTXID", info.MinTXID,
|
||||||
|
"maxTXID", info.MaxTXID)
|
||||||
|
if err := c.LocalFileDeleter(0, info.MinTXID, info.MaxTXID); err != nil {
|
||||||
|
c.logger.Error("failed to remove local l0 file", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.logger.Info("l0 retention enforced", "deleted_count", len(deleted), "max_l1_txid", maxL1TXID)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
364
compactor_test.go
Normal file
364
compactor_test.go
Normal file
@@ -0,0 +1,364 @@
|
|||||||
|
package litestream_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"context"
|
||||||
|
"io"
|
||||||
|
"log/slog"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/superfly/ltx"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/file"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCompactor_Compact(t *testing.T) {
|
||||||
|
t.Run("L0ToL1", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Create test L0 files
|
||||||
|
createTestLTXFile(t, client, 0, 1, 1)
|
||||||
|
createTestLTXFile(t, client, 0, 2, 2)
|
||||||
|
|
||||||
|
info, err := compactor.Compact(context.Background(), 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.Level != 1 {
|
||||||
|
t.Errorf("Level=%d, want 1", info.Level)
|
||||||
|
}
|
||||||
|
if info.MinTXID != 1 || info.MaxTXID != 2 {
|
||||||
|
t.Errorf("TXID range=%d-%d, want 1-2", info.MinTXID, info.MaxTXID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NoFiles", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
_, err := compactor.Compact(context.Background(), 1)
|
||||||
|
if err != litestream.ErrNoCompaction {
|
||||||
|
t.Errorf("err=%v, want ErrNoCompaction", err)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("L1ToL2", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Create L0 files
|
||||||
|
createTestLTXFile(t, client, 0, 1, 1)
|
||||||
|
createTestLTXFile(t, client, 0, 2, 2)
|
||||||
|
|
||||||
|
// Compact to L1
|
||||||
|
_, err := compactor.Compact(context.Background(), 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create more L0 files
|
||||||
|
createTestLTXFile(t, client, 0, 3, 3)
|
||||||
|
|
||||||
|
// Compact to L1 again (should only include TXID 3)
|
||||||
|
info, err := compactor.Compact(context.Background(), 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.MinTXID != 3 || info.MaxTXID != 3 {
|
||||||
|
t.Errorf("TXID range=%d-%d, want 3-3", info.MinTXID, info.MaxTXID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now compact L1 to L2 (should include all from 1-3)
|
||||||
|
info, err = compactor.Compact(context.Background(), 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.Level != 2 {
|
||||||
|
t.Errorf("Level=%d, want 2", info.Level)
|
||||||
|
}
|
||||||
|
if info.MinTXID != 1 || info.MaxTXID != 3 {
|
||||||
|
t.Errorf("TXID range=%d-%d, want 1-3", info.MinTXID, info.MaxTXID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompactor_MaxLTXFileInfo(t *testing.T) {
|
||||||
|
t.Run("WithFiles", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
createTestLTXFile(t, client, 0, 1, 1)
|
||||||
|
createTestLTXFile(t, client, 0, 2, 2)
|
||||||
|
createTestLTXFile(t, client, 0, 3, 5)
|
||||||
|
|
||||||
|
info, err := compactor.MaxLTXFileInfo(context.Background(), 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.MaxTXID != 5 {
|
||||||
|
t.Errorf("MaxTXID=%d, want 5", info.MaxTXID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("NoFiles", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
info, err := compactor.MaxLTXFileInfo(context.Background(), 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.MaxTXID != 0 {
|
||||||
|
t.Errorf("MaxTXID=%d, want 0", info.MaxTXID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WithCache", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Use callbacks for caching
|
||||||
|
cache := make(map[int]*ltx.FileInfo)
|
||||||
|
compactor.CacheGetter = func(level int) (*ltx.FileInfo, bool) {
|
||||||
|
info, ok := cache[level]
|
||||||
|
return info, ok
|
||||||
|
}
|
||||||
|
compactor.CacheSetter = func(level int, info *ltx.FileInfo) {
|
||||||
|
cache[level] = info
|
||||||
|
}
|
||||||
|
|
||||||
|
createTestLTXFile(t, client, 0, 1, 3)
|
||||||
|
|
||||||
|
// First call should populate cache
|
||||||
|
info, err := compactor.MaxLTXFileInfo(context.Background(), 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.MaxTXID != 3 {
|
||||||
|
t.Errorf("MaxTXID=%d, want 3", info.MaxTXID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Second call should use cache
|
||||||
|
info, err = compactor.MaxLTXFileInfo(context.Background(), 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.MaxTXID != 3 {
|
||||||
|
t.Errorf("MaxTXID=%d, want 3 (from cache)", info.MaxTXID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompactor_EnforceRetentionByTXID(t *testing.T) {
|
||||||
|
t.Run("DeletesOldFiles", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Create files at L1
|
||||||
|
createTestLTXFile(t, client, 1, 1, 2)
|
||||||
|
createTestLTXFile(t, client, 1, 3, 5)
|
||||||
|
createTestLTXFile(t, client, 1, 6, 10)
|
||||||
|
|
||||||
|
// Enforce retention - delete files below TXID 5
|
||||||
|
err := compactor.EnforceRetentionByTXID(context.Background(), 1, 5)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify only the first file was deleted
|
||||||
|
info, err := compactor.MaxLTXFileInfo(context.Background(), 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.MaxTXID != 10 {
|
||||||
|
t.Errorf("MaxTXID=%d, want 10", info.MaxTXID)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check that files starting from TXID 3 are still present
|
||||||
|
itr, err := client.LTXFiles(context.Background(), 1, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
for itr.Next() {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
if count != 2 {
|
||||||
|
t.Errorf("file count=%d, want 2", count)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("KeepsLastFile", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Create single file
|
||||||
|
createTestLTXFile(t, client, 1, 1, 2)
|
||||||
|
|
||||||
|
// Try to delete it - should keep at least one
|
||||||
|
err := compactor.EnforceRetentionByTXID(context.Background(), 1, 100)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify file still exists
|
||||||
|
info, err := compactor.MaxLTXFileInfo(context.Background(), 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.MaxTXID != 2 {
|
||||||
|
t.Errorf("MaxTXID=%d, want 2 (last file should be kept)", info.MaxTXID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompactor_EnforceL0Retention(t *testing.T) {
|
||||||
|
t.Run("DeletesCompactedFiles", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Create L0 files
|
||||||
|
createTestLTXFile(t, client, 0, 1, 1)
|
||||||
|
createTestLTXFile(t, client, 0, 2, 2)
|
||||||
|
createTestLTXFile(t, client, 0, 3, 3)
|
||||||
|
|
||||||
|
// Compact to L1
|
||||||
|
_, err := compactor.Compact(context.Background(), 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enforce L0 retention with 0 duration (delete immediately)
|
||||||
|
err = compactor.EnforceL0Retention(context.Background(), 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// L0 files compacted into L1 should be deleted (except last)
|
||||||
|
itr, err := client.LTXFiles(context.Background(), 0, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
for itr.Next() {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
// At least one file should remain
|
||||||
|
if count < 1 {
|
||||||
|
t.Errorf("file count=%d, want at least 1", count)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("SkipsIfNoL1", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Create L0 files without compacting to L1
|
||||||
|
createTestLTXFile(t, client, 0, 1, 1)
|
||||||
|
createTestLTXFile(t, client, 0, 2, 2)
|
||||||
|
|
||||||
|
// Enforce L0 retention - should do nothing since no L1 exists
|
||||||
|
err := compactor.EnforceL0Retention(context.Background(), 0)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// All L0 files should still exist
|
||||||
|
itr, err := client.LTXFiles(context.Background(), 0, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
for itr.Next() {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
if count != 2 {
|
||||||
|
t.Errorf("file count=%d, want 2", count)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestCompactor_EnforceSnapshotRetention(t *testing.T) {
|
||||||
|
t.Run("DeletesOldSnapshots", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Create snapshot files with different timestamps
|
||||||
|
createTestLTXFileWithTimestamp(t, client, litestream.SnapshotLevel, 1, 5, time.Now().Add(-2*time.Hour))
|
||||||
|
createTestLTXFileWithTimestamp(t, client, litestream.SnapshotLevel, 1, 10, time.Now().Add(-30*time.Minute))
|
||||||
|
createTestLTXFileWithTimestamp(t, client, litestream.SnapshotLevel, 1, 15, time.Now().Add(-5*time.Minute))
|
||||||
|
|
||||||
|
// Enforce retention - keep snapshots from last hour
|
||||||
|
_, err := compactor.EnforceSnapshotRetention(context.Background(), time.Hour)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count remaining snapshots
|
||||||
|
itr, err := client.LTXFiles(context.Background(), litestream.SnapshotLevel, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var count int
|
||||||
|
for itr.Next() {
|
||||||
|
count++
|
||||||
|
}
|
||||||
|
// Should have 2 snapshots (the 30min and 5min old ones)
|
||||||
|
if count != 2 {
|
||||||
|
t.Errorf("snapshot count=%d, want 2", count)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// createTestLTXFile creates a minimal LTX file for testing.
|
||||||
|
func createTestLTXFile(t testing.TB, client litestream.ReplicaClient, level int, minTXID, maxTXID ltx.TXID) {
|
||||||
|
t.Helper()
|
||||||
|
createTestLTXFileWithTimestamp(t, client, level, minTXID, maxTXID, time.Now())
|
||||||
|
}
|
||||||
|
|
||||||
|
// createTestLTXFileWithTimestamp creates a minimal LTX file with a specific timestamp.
|
||||||
|
func createTestLTXFileWithTimestamp(t testing.TB, client litestream.ReplicaClient, level int, minTXID, maxTXID ltx.TXID, ts time.Time) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
enc, err := ltx.NewEncoder(&buf)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := enc.EncodeHeader(ltx.Header{
|
||||||
|
Version: ltx.Version,
|
||||||
|
Flags: ltx.HeaderFlagNoChecksum,
|
||||||
|
PageSize: 4096,
|
||||||
|
Commit: 1,
|
||||||
|
MinTXID: minTXID,
|
||||||
|
MaxTXID: maxTXID,
|
||||||
|
Timestamp: ts.UnixMilli(),
|
||||||
|
}); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write a dummy page
|
||||||
|
if err := enc.EncodePage(ltx.PageHeader{Pgno: 1}, make([]byte, 4096)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := enc.Close(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := client.WriteLTXFile(context.Background(), level, minTXID, maxTXID, io.NopCloser(&buf)); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
168
db.go
168
db.go
@@ -151,6 +151,10 @@ type DB struct {
|
|||||||
// Must be set before calling Open().
|
// Must be set before calling Open().
|
||||||
Replica *Replica
|
Replica *Replica
|
||||||
|
|
||||||
|
// Compactor handles shared compaction logic.
|
||||||
|
// Created in NewDB, client set when Replica is assigned.
|
||||||
|
compactor *Compactor
|
||||||
|
|
||||||
// Shutdown sync retry settings.
|
// Shutdown sync retry settings.
|
||||||
// ShutdownSyncTimeout is the total time to retry syncing on shutdown.
|
// ShutdownSyncTimeout is the total time to retry syncing on shutdown.
|
||||||
// ShutdownSyncInterval is the time between retry attempts.
|
// ShutdownSyncInterval is the time between retry attempts.
|
||||||
@@ -200,6 +204,22 @@ func NewDB(path string) *DB {
|
|||||||
|
|
||||||
db.ctx, db.cancel = context.WithCancel(context.Background())
|
db.ctx, db.cancel = context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
// Initialize compactor with nil client (will be set when Replica is assigned).
|
||||||
|
db.compactor = NewCompactor(nil, db.Logger)
|
||||||
|
db.compactor.LocalFileOpener = db.openLocalLTXFile
|
||||||
|
db.compactor.LocalFileDeleter = db.deleteLocalLTXFile
|
||||||
|
db.compactor.CacheGetter = func(level int) (*ltx.FileInfo, bool) {
|
||||||
|
db.maxLTXFileInfos.Lock()
|
||||||
|
defer db.maxLTXFileInfos.Unlock()
|
||||||
|
info, ok := db.maxLTXFileInfos.m[level]
|
||||||
|
return info, ok
|
||||||
|
}
|
||||||
|
db.compactor.CacheSetter = func(level int, info *ltx.FileInfo) {
|
||||||
|
db.maxLTXFileInfos.Lock()
|
||||||
|
defer db.maxLTXFileInfos.Unlock()
|
||||||
|
db.maxLTXFileInfos.m[level] = info
|
||||||
|
}
|
||||||
|
|
||||||
return db
|
return db
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -246,6 +266,29 @@ func (db *DB) LTXPath(level int, minTXID, maxTXID ltx.TXID) string {
|
|||||||
return filepath.Join(db.LTXLevelDir(level), ltx.FormatFilename(minTXID, maxTXID))
|
return filepath.Join(db.LTXLevelDir(level), ltx.FormatFilename(minTXID, maxTXID))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// openLocalLTXFile opens a local LTX file for reading.
|
||||||
|
// Used by the Compactor to prefer local files over remote.
|
||||||
|
func (db *DB) openLocalLTXFile(level int, minTXID, maxTXID ltx.TXID) (io.ReadCloser, error) {
|
||||||
|
return os.Open(db.LTXPath(level, minTXID, maxTXID))
|
||||||
|
}
|
||||||
|
|
||||||
|
// deleteLocalLTXFile deletes a local LTX file.
|
||||||
|
// Used by the Compactor for retention enforcement.
|
||||||
|
func (db *DB) deleteLocalLTXFile(level int, minTXID, maxTXID ltx.TXID) error {
|
||||||
|
path := db.LTXPath(level, minTXID, maxTXID)
|
||||||
|
if err := os.Remove(path); err != nil && !os.IsNotExist(err) {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureCompactorClient ensures the compactor has the current replica client.
|
||||||
|
func (db *DB) ensureCompactorClient() {
|
||||||
|
if db.Replica != nil && db.compactor.Client() != db.Replica.Client {
|
||||||
|
db.compactor.SetClient(db.Replica.Client)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// MaxLTX returns the last LTX file written to level 0.
|
// MaxLTX returns the last LTX file written to level 0.
|
||||||
func (db *DB) MaxLTX() (minTXID, maxTXID ltx.TXID, err error) {
|
func (db *DB) MaxLTX() (minTXID, maxTXID ltx.TXID, err error) {
|
||||||
ents, err := os.ReadDir(db.LTXLevelDir(0))
|
ents, err := os.ReadDir(db.LTXLevelDir(0))
|
||||||
@@ -1696,87 +1739,12 @@ func (db *DB) SnapshotReader(ctx context.Context) (ltx.Pos, io.Reader, error) {
|
|||||||
// Returns metadata for the newly written compaction file. Returns ErrNoCompaction
|
// Returns metadata for the newly written compaction file. Returns ErrNoCompaction
|
||||||
// if no new files are available to be compacted.
|
// if no new files are available to be compacted.
|
||||||
func (db *DB) Compact(ctx context.Context, dstLevel int) (*ltx.FileInfo, error) {
|
func (db *DB) Compact(ctx context.Context, dstLevel int) (*ltx.FileInfo, error) {
|
||||||
srcLevel := dstLevel - 1
|
db.ensureCompactorClient()
|
||||||
|
|
||||||
prevMaxInfo, err := db.Replica.MaxLTXFileInfo(ctx, dstLevel)
|
info, err := db.compactor.Compact(ctx, dstLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("cannot determine max ltx file for destination level: %w", err)
|
return nil, err
|
||||||
}
|
}
|
||||||
seekTXID := prevMaxInfo.MaxTXID + 1
|
|
||||||
|
|
||||||
// Collect files after last compaction.
|
|
||||||
// Normal operation - use fast timestamps
|
|
||||||
itr, err := db.Replica.Client.LTXFiles(ctx, srcLevel, seekTXID, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("source ltx files after %s: %w", seekTXID, err)
|
|
||||||
}
|
|
||||||
defer itr.Close()
|
|
||||||
|
|
||||||
// Ensure all readers are closed by the end, even if an error occurs.
|
|
||||||
var rdrs []io.Reader
|
|
||||||
defer func() {
|
|
||||||
for _, rd := range rdrs {
|
|
||||||
if closer, ok := rd.(io.Closer); ok {
|
|
||||||
_ = closer.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// Build a list of input files to compact from.
|
|
||||||
var minTXID, maxTXID ltx.TXID
|
|
||||||
for itr.Next() {
|
|
||||||
info := itr.Item()
|
|
||||||
|
|
||||||
// Track TXID bounds of all files being compacted.
|
|
||||||
if minTXID == 0 || info.MinTXID < minTXID {
|
|
||||||
minTXID = info.MinTXID
|
|
||||||
}
|
|
||||||
if maxTXID == 0 || info.MaxTXID > maxTXID {
|
|
||||||
maxTXID = info.MaxTXID
|
|
||||||
}
|
|
||||||
|
|
||||||
// Prefer the on-disk LTX file when available so compaction is not subject
|
|
||||||
// to eventual consistency quirks of remote storage providers. Fallback to
|
|
||||||
// downloading the file from the replica client if the local copy has been
|
|
||||||
// removed.
|
|
||||||
if f, err := os.Open(db.LTXPath(srcLevel, info.MinTXID, info.MaxTXID)); err == nil {
|
|
||||||
rdrs = append(rdrs, f)
|
|
||||||
continue
|
|
||||||
} else if !os.IsNotExist(err) {
|
|
||||||
return nil, fmt.Errorf("open local ltx file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := db.Replica.Client.OpenLTXFile(ctx, info.Level, info.MinTXID, info.MaxTXID, 0, 0)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("open ltx file: %w", err)
|
|
||||||
}
|
|
||||||
rdrs = append(rdrs, f)
|
|
||||||
}
|
|
||||||
if len(rdrs) == 0 {
|
|
||||||
return nil, ErrNoCompaction
|
|
||||||
}
|
|
||||||
|
|
||||||
// Stream compaction to destination in level.
|
|
||||||
pr, pw := io.Pipe()
|
|
||||||
go func() {
|
|
||||||
c, err := ltx.NewCompactor(pw, rdrs)
|
|
||||||
if err != nil {
|
|
||||||
pw.CloseWithError(fmt.Errorf("new ltx compactor: %w", err))
|
|
||||||
return
|
|
||||||
}
|
|
||||||
c.HeaderFlags = ltx.HeaderFlagNoChecksum
|
|
||||||
_ = pw.CloseWithError(c.Compact(ctx))
|
|
||||||
}()
|
|
||||||
|
|
||||||
info, err := db.Replica.Client.WriteLTXFile(ctx, dstLevel, minTXID, maxTXID, pr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("write ltx file: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Cache last metadata for the level.
|
|
||||||
db.maxLTXFileInfos.Lock()
|
|
||||||
db.maxLTXFileInfos.m[dstLevel] = info
|
|
||||||
db.maxLTXFileInfos.Unlock()
|
|
||||||
|
|
||||||
// If this is L1, clean up L0 files using the time-based retention policy.
|
// If this is L1, clean up L0 files using the time-based retention policy.
|
||||||
if dstLevel == 1 {
|
if dstLevel == 1 {
|
||||||
@@ -1955,48 +1923,8 @@ func (db *DB) EnforceL0RetentionByTime(ctx context.Context) error {
|
|||||||
// EnforceRetentionByTXID enforces retention so that any LTX files below
|
// EnforceRetentionByTXID enforces retention so that any LTX files below
|
||||||
// the target TXID are deleted. Always keep at least one file.
|
// the target TXID are deleted. Always keep at least one file.
|
||||||
func (db *DB) EnforceRetentionByTXID(ctx context.Context, level int, txID ltx.TXID) (err error) {
|
func (db *DB) EnforceRetentionByTXID(ctx context.Context, level int, txID ltx.TXID) (err error) {
|
||||||
db.Logger.Debug("enforcing retention", "level", level, "txid", txID)
|
db.ensureCompactorClient()
|
||||||
|
return db.compactor.EnforceRetentionByTXID(ctx, level, txID)
|
||||||
// Normal operation - use fast timestamps
|
|
||||||
itr, err := db.Replica.Client.LTXFiles(ctx, level, 0, false)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("fetch ltx files: %w", err)
|
|
||||||
}
|
|
||||||
defer itr.Close()
|
|
||||||
|
|
||||||
var deleted []*ltx.FileInfo
|
|
||||||
var lastInfo *ltx.FileInfo
|
|
||||||
for itr.Next() {
|
|
||||||
info := itr.Item()
|
|
||||||
lastInfo = info
|
|
||||||
|
|
||||||
// If this file's maxTXID is below the target TXID, mark it for deletion.
|
|
||||||
if info.MaxTXID < txID {
|
|
||||||
deleted = append(deleted, info)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure we don't delete the last file.
|
|
||||||
if len(deleted) > 0 && deleted[len(deleted)-1] == lastInfo {
|
|
||||||
deleted = deleted[:len(deleted)-1]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove all files marked for deletion from both remote and local storage.
|
|
||||||
if err := db.Replica.Client.DeleteLTXFiles(ctx, deleted); err != nil {
|
|
||||||
return fmt.Errorf("remove ltx files: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, info := range deleted {
|
|
||||||
localPath := db.LTXPath(level, info.MinTXID, info.MaxTXID)
|
|
||||||
db.Logger.Debug("deleting local ltx file", "level", level, "minTXID", info.MinTXID, "maxTXID", info.MaxTXID, "path", localPath)
|
|
||||||
|
|
||||||
if err := os.Remove(localPath); err != nil && !os.IsNotExist(err) {
|
|
||||||
db.Logger.Error("failed to remove local ltx file", "path", localPath, "error", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// monitor runs in a separate goroutine and monitors the database & WAL.
|
// monitor runs in a separate goroutine and monitors the database & WAL.
|
||||||
|
|||||||
270
vfs.go
270
vfs.go
@@ -83,6 +83,26 @@ type VFS struct {
|
|||||||
// If empty and HydrationEnabled is true, a temp file will be used.
|
// If empty and HydrationEnabled is true, a temp file will be used.
|
||||||
HydrationPath string
|
HydrationPath string
|
||||||
|
|
||||||
|
// CompactionEnabled activates background compaction for the VFS.
|
||||||
|
// Requires WriteEnabled to be true.
|
||||||
|
CompactionEnabled bool
|
||||||
|
|
||||||
|
// CompactionLevels defines the compaction intervals for each level.
|
||||||
|
// If nil, uses default compaction levels.
|
||||||
|
CompactionLevels CompactionLevels
|
||||||
|
|
||||||
|
// SnapshotInterval is how often to create full database snapshots.
|
||||||
|
// Set to 0 to disable automatic snapshots.
|
||||||
|
SnapshotInterval time.Duration
|
||||||
|
|
||||||
|
// SnapshotRetention is how long to keep old snapshots.
|
||||||
|
// Set to 0 to keep all snapshots.
|
||||||
|
SnapshotRetention time.Duration
|
||||||
|
|
||||||
|
// L0Retention is how long to keep L0 files after compaction into L1.
|
||||||
|
// Set to 0 to delete immediately after compaction.
|
||||||
|
L0Retention time.Duration
|
||||||
|
|
||||||
tempDirOnce sync.Once
|
tempDirOnce sync.Once
|
||||||
tempDir string
|
tempDir string
|
||||||
tempDirErr error
|
tempDirErr error
|
||||||
@@ -116,6 +136,7 @@ func (vfs *VFS) openMainDB(name string, flags sqlite3vfs.OpenFlag) (sqlite3vfs.F
|
|||||||
f := NewVFSFile(vfs.client, name, vfs.logger.With("name", name))
|
f := NewVFSFile(vfs.client, name, vfs.logger.With("name", name))
|
||||||
f.PollInterval = vfs.PollInterval
|
f.PollInterval = vfs.PollInterval
|
||||||
f.CacheSize = vfs.CacheSize
|
f.CacheSize = vfs.CacheSize
|
||||||
|
f.vfs = vfs // Store reference to parent VFS for config access
|
||||||
|
|
||||||
// Initialize write support if enabled
|
// Initialize write support if enabled
|
||||||
if vfs.WriteEnabled {
|
if vfs.WriteEnabled {
|
||||||
@@ -137,6 +158,12 @@ func (vfs *VFS) openMainDB(name string, flags sqlite3vfs.OpenFlag) (sqlite3vfs.F
|
|||||||
}
|
}
|
||||||
f.bufferPath = filepath.Join(dir, "write-buffer")
|
f.bufferPath = filepath.Join(dir, "write-buffer")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Initialize compaction if enabled
|
||||||
|
if vfs.CompactionEnabled {
|
||||||
|
f.compactor = NewCompactor(vfs.client, f.logger)
|
||||||
|
// VFS has no local files, so leave LocalFileOpener/LocalFileDeleter nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize hydration support if enabled
|
// Initialize hydration support if enabled
|
||||||
@@ -502,6 +529,13 @@ type VFSFile struct {
|
|||||||
|
|
||||||
PollInterval time.Duration
|
PollInterval time.Duration
|
||||||
CacheSize int
|
CacheSize int
|
||||||
|
|
||||||
|
// Compaction support (only used when VFS.CompactionEnabled is true)
|
||||||
|
vfs *VFS // Reference back to parent VFS for config
|
||||||
|
compactor *Compactor // Shared compaction logic
|
||||||
|
compactionWg sync.WaitGroup
|
||||||
|
compactionCtx context.Context
|
||||||
|
compactionCancel context.CancelFunc
|
||||||
}
|
}
|
||||||
|
|
||||||
// Hydrator handles background hydration of the database to a local file.
|
// Hydrator handles background hydration of the database to a local file.
|
||||||
@@ -920,6 +954,11 @@ func (f *VFSFile) Open() error {
|
|||||||
go func() { defer f.wg.Done(); f.syncLoop() }()
|
go func() { defer f.wg.Done(); f.syncLoop() }()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start compaction monitors if enabled
|
||||||
|
if f.compactor != nil && f.vfs != nil {
|
||||||
|
f.startCompactionMonitors()
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -969,6 +1008,11 @@ func (f *VFSFile) openNewDatabase() error {
|
|||||||
go func() { defer f.wg.Done(); f.syncLoop() }()
|
go func() { defer f.wg.Done(); f.syncLoop() }()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Start compaction monitors if enabled
|
||||||
|
if f.compactor != nil && f.vfs != nil {
|
||||||
|
f.startCompactionMonitors()
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1166,6 +1210,12 @@ func (f *VFSFile) Close() error {
|
|||||||
f.syncTicker.Stop()
|
f.syncTicker.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Stop compaction monitors if running
|
||||||
|
if f.compactionCancel != nil {
|
||||||
|
f.compactionCancel()
|
||||||
|
f.compactionWg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
// Final sync of dirty pages before closing
|
// Final sync of dirty pages before closing
|
||||||
if f.writeEnabled && len(f.dirty) > 0 {
|
if f.writeEnabled && len(f.dirty) > 0 {
|
||||||
if err := f.syncToRemote(); err != nil {
|
if err := f.syncToRemote(); err != nil {
|
||||||
@@ -2328,3 +2378,223 @@ func lookupVFSFile(fileID uint64) (*VFSFile, bool) {
|
|||||||
vfsFile, ok := file.(*VFSFile)
|
vfsFile, ok := file.(*VFSFile)
|
||||||
return vfsFile, ok
|
return vfsFile, ok
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// startCompactionMonitors starts background goroutines for compaction and snapshots.
|
||||||
|
func (f *VFSFile) startCompactionMonitors() {
|
||||||
|
f.compactionCtx, f.compactionCancel = context.WithCancel(f.ctx)
|
||||||
|
|
||||||
|
// Use configured levels or defaults
|
||||||
|
levels := f.vfs.CompactionLevels
|
||||||
|
if levels == nil {
|
||||||
|
levels = DefaultCompactionLevels
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start compaction monitors for each level
|
||||||
|
for _, lvl := range levels {
|
||||||
|
if lvl.Level == 0 {
|
||||||
|
continue // L0 doesn't need compaction (source level)
|
||||||
|
}
|
||||||
|
f.compactionWg.Add(1)
|
||||||
|
go func(level *CompactionLevel) {
|
||||||
|
defer f.compactionWg.Done()
|
||||||
|
f.monitorCompaction(f.compactionCtx, level)
|
||||||
|
}(lvl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start snapshot monitor if configured
|
||||||
|
if f.vfs.SnapshotInterval > 0 {
|
||||||
|
f.compactionWg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer f.compactionWg.Done()
|
||||||
|
f.monitorSnapshots(f.compactionCtx)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start L0 retention monitor if configured
|
||||||
|
if f.vfs.L0Retention > 0 {
|
||||||
|
f.compactionWg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer f.compactionWg.Done()
|
||||||
|
f.monitorL0Retention(f.compactionCtx)
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
f.logger.Info("compaction monitors started",
|
||||||
|
"levels", len(levels),
|
||||||
|
"snapshotInterval", f.vfs.SnapshotInterval,
|
||||||
|
"l0Retention", f.vfs.L0Retention)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compact compacts source level files into the destination level.
|
||||||
|
// Returns ErrNoCompaction if there are no files to compact.
|
||||||
|
func (f *VFSFile) Compact(ctx context.Context, level int) (*ltx.FileInfo, error) {
|
||||||
|
if f.compactor == nil {
|
||||||
|
return nil, fmt.Errorf("compaction not enabled")
|
||||||
|
}
|
||||||
|
return f.compactor.Compact(ctx, level)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Snapshot creates a full database snapshot from remote LTX files.
|
||||||
|
// Unlike DB.Snapshot(), this reads from remote rather than local WAL.
|
||||||
|
func (f *VFSFile) Snapshot(ctx context.Context) (*ltx.FileInfo, error) {
|
||||||
|
if f.compactor == nil {
|
||||||
|
return nil, fmt.Errorf("compaction not enabled")
|
||||||
|
}
|
||||||
|
|
||||||
|
f.mu.Lock()
|
||||||
|
pageSize := f.pageSize
|
||||||
|
commit := f.commit
|
||||||
|
pos := f.pos
|
||||||
|
pages := make(map[uint32]ltx.PageIndexElem, len(f.index))
|
||||||
|
for pgno, elem := range f.index {
|
||||||
|
pages[pgno] = elem
|
||||||
|
}
|
||||||
|
f.mu.Unlock()
|
||||||
|
|
||||||
|
if pageSize == 0 {
|
||||||
|
return nil, fmt.Errorf("page size not initialized")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort page numbers for consistent output
|
||||||
|
pgnos := make([]uint32, 0, len(pages))
|
||||||
|
for pgno := range pages {
|
||||||
|
pgnos = append(pgnos, pgno)
|
||||||
|
}
|
||||||
|
slices.Sort(pgnos)
|
||||||
|
|
||||||
|
// Stream snapshot creation
|
||||||
|
pr, pw := io.Pipe()
|
||||||
|
go func() {
|
||||||
|
enc, err := ltx.NewEncoder(pw)
|
||||||
|
if err != nil {
|
||||||
|
pw.CloseWithError(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := enc.EncodeHeader(ltx.Header{
|
||||||
|
Version: ltx.Version,
|
||||||
|
Flags: ltx.HeaderFlagNoChecksum,
|
||||||
|
PageSize: pageSize,
|
||||||
|
Commit: commit,
|
||||||
|
MinTXID: 1,
|
||||||
|
MaxTXID: pos.TXID,
|
||||||
|
Timestamp: time.Now().UnixMilli(),
|
||||||
|
}); err != nil {
|
||||||
|
pw.CloseWithError(fmt.Errorf("encode header: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pgno := range pgnos {
|
||||||
|
elem := pages[pgno]
|
||||||
|
_, data, err := FetchPage(ctx, f.client, elem.Level, elem.MinTXID, elem.MaxTXID, elem.Offset, elem.Size)
|
||||||
|
if err != nil {
|
||||||
|
pw.CloseWithError(fmt.Errorf("fetch page %d: %w", pgno, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := enc.EncodePage(ltx.PageHeader{Pgno: pgno}, data); err != nil {
|
||||||
|
pw.CloseWithError(fmt.Errorf("encode page %d: %w", pgno, err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := enc.Close(); err != nil {
|
||||||
|
pw.CloseWithError(fmt.Errorf("close encoder: %w", err))
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pw.Close()
|
||||||
|
}()
|
||||||
|
|
||||||
|
return f.client.WriteLTXFile(ctx, SnapshotLevel, 1, pos.TXID, pr)
|
||||||
|
}
|
||||||
|
|
||||||
|
// monitorCompaction runs periodic compaction for a level.
|
||||||
|
func (f *VFSFile) monitorCompaction(ctx context.Context, lvl *CompactionLevel) {
|
||||||
|
f.logger.Info("starting VFS compaction monitor", "level", lvl.Level, "interval", lvl.Interval)
|
||||||
|
|
||||||
|
ticker := time.NewTicker(lvl.Interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
info, err := f.Compact(ctx, lvl.Level)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, ErrNoCompaction) &&
|
||||||
|
!errors.Is(err, context.Canceled) &&
|
||||||
|
!errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
f.logger.Error("compaction failed", "level", lvl.Level, "error", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
f.logger.Debug("compaction completed",
|
||||||
|
"level", lvl.Level,
|
||||||
|
"minTXID", info.MinTXID,
|
||||||
|
"maxTXID", info.MaxTXID,
|
||||||
|
"size", info.Size)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// monitorSnapshots runs periodic snapshot creation.
|
||||||
|
func (f *VFSFile) monitorSnapshots(ctx context.Context) {
|
||||||
|
f.logger.Info("starting VFS snapshot monitor", "interval", f.vfs.SnapshotInterval)
|
||||||
|
|
||||||
|
ticker := time.NewTicker(f.vfs.SnapshotInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
info, err := f.Snapshot(ctx)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, context.Canceled) &&
|
||||||
|
!errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
f.logger.Error("snapshot failed", "error", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
f.logger.Debug("snapshot created",
|
||||||
|
"maxTXID", info.MaxTXID,
|
||||||
|
"size", info.Size)
|
||||||
|
|
||||||
|
// Enforce snapshot retention after creating new snapshot
|
||||||
|
if f.vfs.SnapshotRetention > 0 {
|
||||||
|
if _, err := f.compactor.EnforceSnapshotRetention(ctx, f.vfs.SnapshotRetention); err != nil {
|
||||||
|
f.logger.Error("snapshot retention failed", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// monitorL0Retention runs periodic L0 retention enforcement.
|
||||||
|
func (f *VFSFile) monitorL0Retention(ctx context.Context) {
|
||||||
|
f.logger.Info("starting VFS L0 retention monitor", "retention", f.vfs.L0Retention)
|
||||||
|
|
||||||
|
// Check more frequently than the retention period
|
||||||
|
checkInterval := f.vfs.L0Retention / 4
|
||||||
|
if checkInterval < time.Minute {
|
||||||
|
checkInterval = time.Minute
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(checkInterval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
if err := f.compactor.EnforceL0Retention(ctx, f.vfs.L0Retention); err != nil {
|
||||||
|
if !errors.Is(err, context.Canceled) &&
|
||||||
|
!errors.Is(err, context.DeadlineExceeded) {
|
||||||
|
f.logger.Error("L0 retention enforcement failed", "error", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
192
vfs_compaction_test.go
Normal file
192
vfs_compaction_test.go
Normal file
@@ -0,0 +1,192 @@
|
|||||||
|
//go:build vfs
|
||||||
|
// +build vfs
|
||||||
|
|
||||||
|
package litestream_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"log/slog"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/benbjohnson/litestream"
|
||||||
|
"github.com/benbjohnson/litestream/file"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestVFSFile_Compact(t *testing.T) {
|
||||||
|
t.Run("ManualCompact", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
client := file.NewReplicaClient(dir)
|
||||||
|
|
||||||
|
// Pre-create some L0 files to test compaction
|
||||||
|
createTestLTXFile(t, client, 0, 1, 1)
|
||||||
|
createTestLTXFile(t, client, 0, 2, 2)
|
||||||
|
createTestLTXFile(t, client, 0, 3, 3)
|
||||||
|
|
||||||
|
// Create VFS with compaction enabled
|
||||||
|
vfs := litestream.NewVFS(client, slog.Default())
|
||||||
|
vfs.WriteEnabled = true
|
||||||
|
vfs.CompactionEnabled = true
|
||||||
|
|
||||||
|
// Create VFSFile directly to test Compact method
|
||||||
|
f := litestream.NewVFSFile(client, "test.db", slog.Default())
|
||||||
|
f.PollInterval = time.Second
|
||||||
|
f.CacheSize = litestream.DefaultCacheSize
|
||||||
|
|
||||||
|
// Initialize the compactor manually
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Compact L0 to L1
|
||||||
|
info, err := compactor.Compact(context.Background(), 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.Level != 1 {
|
||||||
|
t.Errorf("Level=%d, want 1", info.Level)
|
||||||
|
}
|
||||||
|
if info.MinTXID != 1 || info.MaxTXID != 3 {
|
||||||
|
t.Errorf("TXID range=%d-%d, want 1-3", info.MinTXID, info.MaxTXID)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVFSFile_Snapshot(t *testing.T) {
|
||||||
|
t.Run("MultiLevelCompaction", func(t *testing.T) {
|
||||||
|
dir := t.TempDir()
|
||||||
|
client := file.NewReplicaClient(dir)
|
||||||
|
|
||||||
|
// Pre-create L0 files to simulate VFS writes
|
||||||
|
createTestLTXFile(t, client, 0, 1, 1)
|
||||||
|
createTestLTXFile(t, client, 0, 2, 2)
|
||||||
|
createTestLTXFile(t, client, 0, 3, 3)
|
||||||
|
|
||||||
|
compactor := litestream.NewCompactor(client, slog.Default())
|
||||||
|
|
||||||
|
// Compact L0 to L1
|
||||||
|
info, err := compactor.Compact(context.Background(), 1)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.Level != 1 {
|
||||||
|
t.Errorf("Level=%d, want 1", info.Level)
|
||||||
|
}
|
||||||
|
t.Logf("Compacted to L1: minTXID=%d, maxTXID=%d", info.MinTXID, info.MaxTXID)
|
||||||
|
|
||||||
|
// Compact L1 to L2
|
||||||
|
info, err = compactor.Compact(context.Background(), 2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if info.Level != 2 {
|
||||||
|
t.Errorf("Level=%d, want 2", info.Level)
|
||||||
|
}
|
||||||
|
t.Logf("Compacted to L2: minTXID=%d, maxTXID=%d", info.MinTXID, info.MaxTXID)
|
||||||
|
|
||||||
|
// Verify L2 file exists
|
||||||
|
itr, err := client.LTXFiles(context.Background(), 2, 0, false)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer itr.Close()
|
||||||
|
|
||||||
|
var l2Count int
|
||||||
|
for itr.Next() {
|
||||||
|
l2Count++
|
||||||
|
}
|
||||||
|
if l2Count != 1 {
|
||||||
|
t.Errorf("L2 file count=%d, want 1", l2Count)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestDefaultCompactionLevels(t *testing.T) {
|
||||||
|
levels := litestream.DefaultCompactionLevels
|
||||||
|
if len(levels) != 4 {
|
||||||
|
t.Fatalf("DefaultCompactionLevels length=%d, want 4", len(levels))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify L0 (raw files, no interval)
|
||||||
|
if levels[0].Level != 0 {
|
||||||
|
t.Errorf("levels[0].Level=%d, want 0", levels[0].Level)
|
||||||
|
}
|
||||||
|
if levels[0].Interval != 0 {
|
||||||
|
t.Errorf("levels[0].Interval=%v, want 0", levels[0].Interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify L1 (30 second compaction)
|
||||||
|
if levels[1].Level != 1 {
|
||||||
|
t.Errorf("levels[1].Level=%d, want 1", levels[1].Level)
|
||||||
|
}
|
||||||
|
if levels[1].Interval != 30*time.Second {
|
||||||
|
t.Errorf("levels[1].Interval=%v, want 30s", levels[1].Interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify L2 (5 minute compaction)
|
||||||
|
if levels[2].Level != 2 {
|
||||||
|
t.Errorf("levels[2].Level=%d, want 2", levels[2].Level)
|
||||||
|
}
|
||||||
|
if levels[2].Interval != 5*time.Minute {
|
||||||
|
t.Errorf("levels[2].Interval=%v, want 5m", levels[2].Interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify L3 (hourly compaction)
|
||||||
|
if levels[3].Level != 3 {
|
||||||
|
t.Errorf("levels[3].Level=%d, want 3", levels[3].Level)
|
||||||
|
}
|
||||||
|
if levels[3].Interval != time.Hour {
|
||||||
|
t.Errorf("levels[3].Interval=%v, want 1h", levels[3].Interval)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verify they validate
|
||||||
|
if err := levels.Validate(); err != nil {
|
||||||
|
t.Errorf("DefaultCompactionLevels.Validate()=%v, want nil", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVFS_CompactionConfig(t *testing.T) {
|
||||||
|
t.Run("DefaultConfig", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
vfs := litestream.NewVFS(client, slog.Default())
|
||||||
|
|
||||||
|
// Default should have compaction disabled
|
||||||
|
if vfs.CompactionEnabled {
|
||||||
|
t.Error("CompactionEnabled should be false by default")
|
||||||
|
}
|
||||||
|
if vfs.CompactionLevels != nil {
|
||||||
|
t.Error("CompactionLevels should be nil by default")
|
||||||
|
}
|
||||||
|
if vfs.SnapshotInterval != 0 {
|
||||||
|
t.Error("SnapshotInterval should be 0 by default")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
t.Run("WithCompactionConfig", func(t *testing.T) {
|
||||||
|
client := file.NewReplicaClient(t.TempDir())
|
||||||
|
vfs := litestream.NewVFS(client, slog.Default())
|
||||||
|
vfs.WriteEnabled = true
|
||||||
|
vfs.CompactionEnabled = true
|
||||||
|
vfs.CompactionLevels = litestream.CompactionLevels{
|
||||||
|
{Level: 0, Interval: 0},
|
||||||
|
{Level: 1, Interval: time.Minute},
|
||||||
|
}
|
||||||
|
vfs.SnapshotInterval = 24 * time.Hour
|
||||||
|
vfs.SnapshotRetention = 7 * 24 * time.Hour
|
||||||
|
vfs.L0Retention = 5 * time.Minute
|
||||||
|
|
||||||
|
if !vfs.CompactionEnabled {
|
||||||
|
t.Error("CompactionEnabled should be true")
|
||||||
|
}
|
||||||
|
if len(vfs.CompactionLevels) != 2 {
|
||||||
|
t.Errorf("CompactionLevels length=%d, want 2", len(vfs.CompactionLevels))
|
||||||
|
}
|
||||||
|
if vfs.SnapshotInterval != 24*time.Hour {
|
||||||
|
t.Errorf("SnapshotInterval=%v, want 24h", vfs.SnapshotInterval)
|
||||||
|
}
|
||||||
|
if vfs.SnapshotRetention != 7*24*time.Hour {
|
||||||
|
t.Errorf("SnapshotRetention=%v, want 168h", vfs.SnapshotRetention)
|
||||||
|
}
|
||||||
|
if vfs.L0Retention != 5*time.Minute {
|
||||||
|
t.Errorf("L0Retention=%v, want 5m", vfs.L0Retention)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
Reference in New Issue
Block a user