refactor: make TableShardedReplication a thin wrapper around LayoutManager

This commit is contained in:
Yureka
2025-04-20 20:37:52 +02:00
parent c8e9c45889
commit 899292ee28
3 changed files with 30 additions and 23 deletions

View File

@@ -155,10 +155,8 @@ impl Garage {
let system = System::new(network_key, replication_factor, consistency_mode, &config)?;
let meta_rep_param = TableShardedReplication {
system: system.clone(),
replication_factor: replication_factor.into(),
write_quorum: replication_factor.write_quorum(consistency_mode),
read_quorum: replication_factor.read_quorum(consistency_mode),
layout_manager: system.layout_manager.clone(),
consistency_mode,
};
let control_rep_param = TableFullReplication {

View File

@@ -137,6 +137,14 @@ impl LayoutVersion {
ReplicationFactor::new(self.replication_factor).unwrap()
}
pub fn read_quorum(&self, consistency_mode: ConsistencyMode) -> usize {
self.replication_factor().read_quorum(consistency_mode)
}
pub fn write_quorum(&self, consistency_mode: ConsistencyMode) -> usize {
self.replication_factor().write_quorum(consistency_mode)
}
// ===================== internal information extractors ======================
pub(crate) fn expect_get_node_capacity(&self, uuid: &Uuid) -> u64 {

View File

@@ -2,9 +2,10 @@ use std::sync::Arc;
use std::time::Duration;
use garage_rpc::layout::*;
use garage_rpc::system::System;
use garage_rpc::replication_mode::ConsistencyMode;
use garage_util::data::*;
use crate::replication::sharded::manager::LayoutManager;
use crate::replication::*;
/// Sharded replication schema:
@@ -16,13 +17,8 @@ use crate::replication::*;
#[derive(Clone)]
pub struct TableShardedReplication {
/// The membership manager of this node
pub system: Arc<System>,
/// How many time each data should be replicated
pub replication_factor: usize,
/// How many nodes to contact for a read, should be at most `replication_factor`
pub read_quorum: usize,
/// How many nodes to contact for a write, should be at most `replication_factor`
pub write_quorum: usize,
pub layout_manager: Arc<LayoutManager>,
pub consistency_mode: ConsistencyMode,
}
impl TableReplication for TableShardedReplication {
@@ -32,9 +28,8 @@ impl TableReplication for TableShardedReplication {
type WriteSets = WriteLock<Vec<Vec<Uuid>>>;
fn storage_nodes(&self, hash: &Hash) -> Vec<Uuid> {
let layout = self.system.cluster_layout();
let mut ret = vec![];
for version in layout.versions().iter() {
for version in self.layout_manager.layout().versions().iter() {
ret.extend(version.nodes_of(hash));
}
ret.sort();
@@ -43,31 +38,37 @@ impl TableReplication for TableShardedReplication {
}
fn read_nodes(&self, hash: &Hash) -> Vec<Uuid> {
self.system
.cluster_layout()
self.layout_manager
.layout()
.read_version()
.nodes_of(hash)
.collect()
}
fn read_quorum(&self) -> usize {
self.read_quorum
self.layout_manager
.layout()
.read_version()
.read_quorum(self.consistency_mode)
}
fn write_sets(&self, hash: &Hash) -> Self::WriteSets {
self.system
.layout_manager
.write_lock_with(|l| write_sets(l, hash))
self.layout_manager.write_lock_with(|l| write_sets(l, hash))
}
fn write_quorum(&self) -> usize {
self.write_quorum
self.layout_manager
.layout()
.current()
.write_quorum(self.consistency_mode)
}
fn partition_of(&self, hash: &Hash) -> Partition {
self.system.cluster_layout().current().partition_of(hash)
self.layout_manager.layout().current().partition_of(hash)
}
fn sync_partitions(&self) -> SyncPartitions {
let layout = self.system.cluster_layout();
let layout = self.layout_manager.layout();
let layout_version = layout.ack_map_min();
let mut partitions = layout