Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
43 changes: 39 additions & 4 deletions Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -116,6 +116,7 @@ crypto-bigint = { version = "=0.6.1", features = ["serde", "rand_core", "extra-s
ctor = "=0.4.2" # Constructor functions - HIGH RISK: Individual maintainer (mmastrac), test-only dependency
dashmap = "=6.1.0" # Concurrent hashmap - HIGH RISK: Individual maintainer (xacrimon), despite 156M+ downloads
derive_more = { version = "=2.0.1", features = ["display"] } # Derive macros for common traits - HIGH RISK: Individual maintainer (JelteF), despite 180M+ downloads
dhat = "=0.3.3" # Heap profiling - MEDIUM RISK: David Tolnay adjacent (rustacean), useful for memory debugging, dev-only
enum_dispatch = "=0.3.13" # Enum dispatch optimization - HIGH RISK: Individual maintainer (Anton Lazarev), despite 29M+ downloads
futures = "=0.3.31" # Async futures - LOW RISK: rust-lang team
futures-util = "=0.3.31" # Futures utilities - LOW RISK: rust-lang team
Expand Down
5 changes: 5 additions & 0 deletions core/service/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@ cfg-if.workspace = true
ciborium.workspace = true
clap = { workspace = true, features = ["derive"] }
console_error_panic_hook.workspace = true
dhat = { workspace = true, optional = true }
enum_dispatch.workspace = true
futures-util.workspace = true
hex.workspace = true
Expand Down Expand Up @@ -183,3 +184,7 @@ insecure = [
"threshold-fhe/testing",
"dep:nsm-nitro-enclave-utils"
]
# Memory profiling feature for debugging memory consumption issues
# Enable with: cargo build --features memory-profiling
# Produces dhat-heap.json file that can be viewed with https://nnethercote.github.io/dh_view/dh_view.html
memory-profiling = ["dep:dhat"]
11 changes: 11 additions & 0 deletions core/service/src/bin/kms-server.rs
Original file line number Diff line number Diff line change
Expand Up @@ -331,7 +331,18 @@ async fn build_tls_config(
Ok((server_config, client_config, verifier))
}

// Memory profiling with dhat (enable with --features memory-profiling)
// View results at: https://nnethercote.github.io/dh_view/dh_view.html
#[cfg(feature = "memory-profiling")]
#[global_allocator]
static ALLOC: dhat::Alloc = dhat::Alloc;

fn main() -> anyhow::Result<()> {
// Initialize dhat profiler when memory-profiling feature is enabled
// This will produce a dhat-heap.json file on program exit
#[cfg(feature = "memory-profiling")]
let _profiler = dhat::Profiler::new_heap();

let args = KmsArgs::parse();
// NOTE: this config is only needed to set up the tokio runtime
// we read it again in [main_exec] to set up the rest of the server
Expand Down
38 changes: 5 additions & 33 deletions core/service/src/engine/threshold/service/key_generator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -53,10 +53,7 @@ use crate::{
BaseKmsStruct, KeyGenMetadata, DSEP_PUBDATA_KEY,
},
keyset_configuration::InternalKeySetConfig,
threshold::{
service::{session::ImmutableSessionMaker, ThresholdFheKeys},
traits::KeyGenerator,
},
threshold::{service::session::ImmutableSessionMaker, traits::KeyGenerator},
utils::MetricedError,
validation::{
parse_optional_proto_request_id, parse_proto_request_id, RequestIdParsingErr,
Expand Down Expand Up @@ -1092,39 +1089,14 @@ impl<
}
};

let (integer_server_key, decompression_key, sns_key) = {
let (
raw_server_key,
_raw_ksk_material,
_raw_compression_key,
raw_decompression_key,
raw_noise_squashing_key,
_raw_noise_squashing_compression_key,
_raw_rerandomization_key,
_raw_tag,
) = pub_key_set.server_key.clone().into_raw_parts();
(
raw_server_key,
raw_decompression_key,
raw_noise_squashing_key,
)
};

let threshold_fhe_keys = ThresholdFheKeys {
private_keys: Arc::new(private_keys),
integer_server_key: Arc::new(integer_server_key),
sns_key: sns_key.map(Arc::new),
decompression_key: decompression_key.map(Arc::new),
meta_data: info.clone(),
};

//Note: We can't easily check here whether we succeeded writing to the meta store
//thus we can't increment the error counter if it fails
// Memory optimization: Storage function now serializes server_key first,
// then consumes it to extract components. This eliminates the need to clone the large
// server_key structure (~tens of GiB with production parameters).
crypto_storage
.write_threshold_keys_with_dkg_meta_store(
req_id,
epoch_id,
threshold_fhe_keys,
private_keys,
pub_key_set,
info,
meta_store,
Expand Down
24 changes: 24 additions & 0 deletions core/service/src/engine/threshold/service/kms_impl.rs
Original file line number Diff line number Diff line change
Expand Up @@ -770,5 +770,29 @@ mod tests {

(priv_key_set, pub_key_set)
}

/// Initializes dummy keys for storage tests.
/// Returns (PrivateKeySet, FhePubKeySet) for the new memory-optimized storage API.
pub fn init_dummy_for_storage<R: rand::Rng + rand::CryptoRng>(
param: threshold_fhe::execution::tfhe_internals::parameters::DKGParams,
tag: tfhe::Tag,
rng: &mut R,
) -> (
PrivateKeySet<{ ResiduePolyF4Z128::EXTENSION_DEGREE }>,
FhePubKeySet,
) {
let keyset = threshold_fhe::execution::tfhe_internals::test_feature::gen_key_set(
param, tag, rng,
);

let pub_key_set = FhePubKeySet {
public_key: keyset.public_keys.public_key,
server_key: keyset.public_keys.server_key,
};

let priv_key_set = PrivateKeySet::init_dummy(param);

(priv_key_set, pub_key_set)
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -871,9 +871,9 @@ mod tests {

let key_id = RequestId::new_random(rng);

// make a dummy private keyset
let (threshold_fhe_keys, fhe_key_set) =
ThresholdFheKeys::init_dummy(param, key_id.into(), rng);
// make a dummy private keyset for storage
let (private_keys, fhe_key_set) =
ThresholdFheKeys::init_dummy_for_storage(param, key_id.into(), rng);

// Not a huge deal if we clone this server key since we only use small/test parameters
tfhe::set_server_key(fhe_key_set.server_key.clone());
Expand Down Expand Up @@ -912,7 +912,7 @@ mod tests {
.write_threshold_keys_with_dkg_meta_store(
&key_id,
&epoch_id,
threshold_fhe_keys,
private_keys,
fhe_key_set,
info,
Arc::clone(&key_meta_store),
Expand Down
20 changes: 4 additions & 16 deletions core/service/src/engine/threshold/service/resharer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,7 @@ use crate::{
compute_info_standard_keygen, retrieve_parameters, BaseKmsStruct, KeyGenMetadata,
DSEP_PUBDATA_KEY,
},
threshold::{
service::{session::ImmutableSessionMaker, ThresholdFheKeys},
traits::Resharer,
},
threshold::{service::session::ImmutableSessionMaker, traits::Resharer},
utils::MetricedError,
validation::{
parse_optional_proto_request_id, parse_proto_request_id, RequestIdParsingErr,
Expand Down Expand Up @@ -570,9 +567,6 @@ impl<PubS: Storage + Send + Sync + 'static, PrivS: StorageExt + Send + Sync + 's
)
.await?;

let (integer_server_key, _, _, decompression_key, sns_key, _, _, _) =
fhe_pubkeys.server_key.clone().into_raw_parts();

// Compute all the info required for storing
// using the same IDs and domain as we should've had the
// DKG went through successfully
Expand All @@ -594,14 +588,6 @@ impl<PubS: Storage + Send + Sync + 'static, PrivS: StorageExt + Send + Sync + 's
}
};

let threshold_fhe_keys = ThresholdFheKeys {
private_keys: Arc::new(new_private_key_set),
integer_server_key: Arc::new(integer_server_key),
sns_key: sns_key.map(Arc::new),
decompression_key: decompression_key.map(Arc::new),
meta_data: info.clone(),
};

// Purge before we can overwrite, use a dummy_meta_store
// as this was meant to update the meta store of DKG upon failing
let dummy_meta_store = RwLock::new(MetaStore::<KeyGenMetadata>::new(1, 1));
Expand All @@ -618,12 +604,14 @@ impl<PubS: Storage + Send + Sync + 'static, PrivS: StorageExt + Send + Sync + 's
// HOTFIX(keygen-recovery): Note that this overwrites the private storage
// at the given key ID. It's needed as long as reshare shortcuts the
// GW, but should be fixed long term.
// Memory optimization: Storage function now handles server_key
// component extraction internally, avoiding the need to clone server_key here.
crypto_storage
.write_threshold_keys_with_reshare_meta_store(
&request_id,
&key_id_to_reshare,
&old_epoch_id,
threshold_fhe_keys,
new_private_key_set,
fhe_pubkeys,
info.clone(),
Arc::clone(&meta_store),
Expand Down
8 changes: 4 additions & 4 deletions core/service/src/engine/threshold/service/user_decryptor.rs
Original file line number Diff line number Diff line change
Expand Up @@ -763,9 +763,9 @@ mod tests {
RealUserDecryptor::init_test_dummy_decryptor(base_kms, session_maker.make_immutable())
.await;

// make a dummy private keyset
let (threshold_fhe_keys, fhe_key_set) =
ThresholdFheKeys::init_dummy(param, key_id.into(), rng);
// make a dummy private keyset for storage
let (private_keys, fhe_key_set) =
ThresholdFheKeys::init_dummy_for_storage(param, key_id.into(), rng);

// Not a huge deal if we clone this server key since we only use small/test parameters
tfhe::set_server_key(fhe_key_set.server_key.clone());
Expand Down Expand Up @@ -794,7 +794,7 @@ mod tests {
.write_threshold_keys_with_dkg_meta_store(
&key_id,
&epoch_id,
threshold_fhe_keys,
private_keys,
fhe_key_set,
info,
Arc::clone(&key_meta_store),
Expand Down
Loading
Loading