From e495d06e3cdc09b89b5da53fd5e003820eadb9a8 Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Wed, 17 Jul 2024 14:16:36 +0200 Subject: [PATCH 1/8] keymanager/src/churp: Minor fixes --- keymanager/src/churp/handler.rs | 12 ++++++------ keymanager/src/churp/storage.rs | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/keymanager/src/churp/handler.rs b/keymanager/src/churp/handler.rs index eb5f94e8251..4a915465831 100644 --- a/keymanager/src/churp/handler.rs +++ b/keymanager/src/churp/handler.rs @@ -310,7 +310,7 @@ impl Churp { /// needs to be kept secret and generated only for authorized nodes. pub fn share_distribution_switch_point( &self, - _ctx: &RpcContext, + ctx: &RpcContext, req: &QueryRequest, ) -> Result> { let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; @@ -325,8 +325,8 @@ impl Churp { return Err(Error::NotInCommittee.into()); } - self.verify_node_id(_ctx, node_id)?; - self.verify_km_enclave(_ctx, &status.policy)?; + self.verify_node_id(ctx, node_id)?; + self.verify_km_enclave(ctx, &status.policy)?; match status.suite_id { SuiteId::NistP384Sha3_384 => { @@ -377,7 +377,7 @@ impl Churp { /// for authorized nodes. pub fn bivariate_share( &self, - _ctx: &RpcContext, + ctx: &RpcContext, req: &QueryRequest, ) -> Result { let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; @@ -387,8 +387,8 @@ impl Churp { return Err(Error::NotInCommittee.into()); } - self.verify_node_id(_ctx, node_id)?; - self.verify_km_enclave(_ctx, &status.policy)?; + self.verify_node_id(ctx, node_id)?; + self.verify_km_enclave(ctx, &status.policy)?; match status.suite_id { SuiteId::NistP384Sha3_384 => { diff --git a/keymanager/src/churp/storage.rs b/keymanager/src/churp/storage.rs index e57a62d2481..cd4fff3962f 100644 --- a/keymanager/src/churp/storage.rs +++ b/keymanager/src/churp/storage.rs @@ -218,14 +218,14 @@ impl Storage { /// Creates storage key for the secret share. fn create_secret_share_storage_key(churp_id: u8) -> Vec { let mut key = SECRET_SHARE_STORAGE_KEY_PREFIX.to_vec(); - key.extend(vec![churp_id]); + key.extend(&[churp_id]); key } /// Creates storage key for the next secret share. fn create_next_secret_share_storage_key(churp_id: u8) -> Vec { let mut key = NEXT_SECRET_SHARE_STORAGE_KEY_PREFIX.to_vec(); - key.extend(vec![churp_id]); + key.extend(&[churp_id]); key } From a5101ca67b64bee2c573c1462e91b1779f4bff2f Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Thu, 18 Jul 2024 11:24:13 +0200 Subject: [PATCH 2/8] keymanager/src/churp: Add handler --- keymanager/src/churp/handler.rs | 1292 ++++++++++++++++--------------- keymanager/src/churp/methods.rs | 2 +- 2 files changed, 662 insertions(+), 632 deletions(-) diff --git a/keymanager/src/churp/handler.rs b/keymanager/src/churp/handler.rs index 4a915465831..1e49a2847d6 100644 --- a/keymanager/src/churp/handler.rs +++ b/keymanager/src/churp/handler.rs @@ -1,6 +1,5 @@ //! CHURP handler. use std::{ - any::Any, cmp, collections::HashMap, convert::TryInto, @@ -98,94 +97,23 @@ const RUNTIME_CONTEXT_SEPARATOR: &[u8] = b" for runtime "; /// on the churp ID. const CHURP_CONTEXT_SEPARATOR: &[u8] = b" for churp "; -/// Data associated with a handoff. -struct HandoffData { - /// The epoch of the handoff. +/// Represents information about a dealer. +struct DealerInfo { + /// The epoch during which this dealer is active. epoch: EpochTime, - - /// Opaque object belonging to the handoff. - object: Arc, + /// The dealer associated with this information. + dealer: Arc>, } -/// Key manager application that implements churn-robust proactive secret -/// sharing scheme (CHURP). -pub struct Churp { - /// Host node identifier. - node_id: PublicKey, - /// Key manager runtime ID. - runtime_id: Namespace, - /// Runtime identity. - identity: Arc, - /// Runtime attestation key signer. - signer: Arc, - - /// Storage handler. - storage: Storage, - /// Consensus verifier. - consensus_verifier: Arc, - /// Low-level access to the underlying Runtime Host Protocol. - protocol: Arc, - - /// Verified beacon state. - beacon_state: BeaconState, - /// Verified churp state. - churp_state: ChurpState, - /// Verified registry state. - registry_state: RegistryState, - - /// Shareholders with secret shares for the last successfully completed - /// handoff, one per scheme. - shareholders: Mutex>, - /// Dealers of bivariate shares for the next handoff, one per scheme. - dealers: Mutex>, - /// Next handoffs, limited to one per scheme. - handoffs: Mutex>, - - /// Cached verified policies. - policies: VerifiedPolicies, +/// Represents information about a handoff. +struct HandoffInfo { + /// The handoff epoch. + epoch: EpochTime, + /// The handoff associated with this information. + handoff: Arc>, } -impl Churp { - pub fn new( - node_id: PublicKey, - identity: Arc, - protocol: Arc, - consensus_verifier: Arc, - ) -> Self { - let runtime_id = protocol.get_runtime_id(); - let storage = Storage::new(Arc::new(ProtocolUntrustedLocalStorage::new( - protocol.clone(), - ))); - let signer: Arc = identity.clone(); - - let beacon_state = BeaconState::new(consensus_verifier.clone()); - let churp_state = ChurpState::new(consensus_verifier.clone()); - let registry_state = RegistryState::new(consensus_verifier.clone()); - - let shareholders = Mutex::new(HashMap::new()); - let dealers = Mutex::new(HashMap::new()); - let handoffs = Mutex::new(HashMap::new()); - - let policies = VerifiedPolicies::new(); - - Self { - identity, - signer, - node_id, - runtime_id, - protocol, - consensus_verifier, - storage, - beacon_state, - shareholders, - churp_state, - registry_state, - dealers, - handoffs, - policies, - } - } - +pub(crate) trait Handler { /// Returns the verification matrix of the shared secret bivariate /// polynomial from the last successfully completed handoff. /// @@ -213,20 +141,7 @@ impl Churp { /// NOTE: This method can be called over an insecure channel, as the matrix /// does not contain any sensitive information. However, the checksum /// of the matrix should always be verified against the consensus layer. - pub fn verification_matrix(&self, req: &QueryRequest) -> Result> { - let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; - let shareholder = match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.get_shareholder::(req.id, req.epoch)? - } - }; - let vm = shareholder - .verifiable_share() - .verification_matrix() - .to_bytes(); - - Ok(vm) - } + fn verification_matrix(&self, req: &QueryRequest) -> Result>; /// Returns switch point for share reduction for the calling node. /// @@ -244,108 +159,402 @@ impl Churp { /// /// WARNING: This method must be called over a secure channel as the point /// needs to be kept secret and generated only for authorized nodes. - pub fn share_reduction_switch_point( + fn share_reduction_switch_point(&self, ctx: &RpcContext, req: &QueryRequest) + -> Result>; + + /// Returns switch point for full share distribution for the calling node. + /// + /// The point is evaluation of the proactivized shared secret bivariate + /// polynomial at the given x (node ID) and y value (me). + /// + /// Switch point: + /// ```text + /// Point = B(node_id, me) + \sum Q_i(node_id, me) + /// ``` + /// Bivariate polynomial: + /// ```text + /// B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j + /// ``` + /// Proactive bivariate polynomial: + /// ```text + /// Q_i(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j + /// ``` + /// + /// WARNING: This method must be called over a secure channel as the point + /// needs to be kept secret and generated only for authorized nodes. + fn share_distribution_switch_point( &self, ctx: &RpcContext, req: &QueryRequest, - ) -> Result> { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + ) -> Result>; - let kind = Self::handoff_kind(&status); - if !matches!(kind, HandoffKind::CommitteeChanged) { - return Err(Error::InvalidHandoff.into()); + /// Returns proactive bivariate polynomial share for the calling node. + /// + /// A bivariate share is a partial evaluation of a randomly selected + /// bivariate polynomial at a specified x or y value (node ID). Each node + /// interested in joining the new committee selects a bivariate polynomial + /// before the next handoff and commits to it by submitting the checksum + /// of the corresponding verification matrix to the consensus layer. + /// The latter can be used to verify the received bivariate shares. + /// + /// Bivariate polynomial share: + /// ```text + /// S_i(y) = Q_i(node_id, y) (dealing phase or unchanged committee) + /// S_i(x) = Q_i(x, node_id) (committee changes) + /// ``` + /// Proactive bivariate polynomial: + /// ```text + /// Q_i(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j + /// ``` + /// + /// WARNING: This method must be called over a secure channel as + /// the polynomial needs to be kept secret and generated only + /// for authorized nodes. + fn bivariate_share( + &self, + ctx: &RpcContext, + req: &QueryRequest, + ) -> Result; + + /// Returns the key share for the given key ID generated by the key + /// derivation center. + /// + /// Key share: + /// ```text + /// KS_i = s_i * H(key_id) + /// ``` + /// + /// WARNING: This method must be called over a secure channel as the key + /// share needs to be kept secret and generated only for authorized nodes. + fn sgx_policy_key_share( + &self, + ctx: &RpcContext, + req: &KeyShareRequest, + ) -> Result; + + /// Prepare CHURP for participation in the given handoff of the protocol. + /// + /// Initialization randomly selects a bivariate polynomial for the given + /// handoff, computes the corresponding verification matrix and its + /// checksum, and signs the latter. + /// + /// Bivariate polynomial: + /// B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j + /// + /// Verification matrix: + /// M = [b_{i,j} * G] + /// + /// Checksum: + /// H = KMAC256(M, runtime ID, handoff) + /// + /// The bivariate polynomial is zero-hole in all handoffs expect in the + /// first one (dealing phase). + /// + /// This method must be called locally. + fn init(&self, req: &HandoffRequest) -> Result; + + /// Tries to fetch switch points for share reduction from the given nodes. + /// + /// Switch points should be obtained from (at least) t distinct nodes + /// belonging to the old committee, verified against verification matrix + /// whose checksum was published in the consensus layer, merged into + /// a reduced share using Lagrange interpolation and proactivized with + /// bivariate shares. + /// + /// Switch point: + /// ```text + /// P_i = B(node_i, me) + ///``` + /// Reduced share: + /// ```text + /// RS(x) = B(x, me) + /// ```` + /// Proactive reduced share: + /// ```text + /// QR(x) = RS(x) + \sum Q_i(x, me) + /// ```` + fn share_reduction(&self, req: &FetchRequest) -> Result; + + /// Tries to fetch switch data points for full share distribution from + /// the given nodes. + /// + /// Switch points should be obtained from (at least) 2t distinct nodes + /// belonging to the new committee, verified against the sum of the + /// verification matrix and the verification matrices of proactive + /// bivariate shares, whose checksums were published in the consensus + /// layer, and merged into a full share using Lagrange interpolation. + /// + /// Switch point: + /// ```text + /// P_i = B(me, node_i) + \sum Q_i(me, node_i) + ///``` + /// Full share: + /// ```text + /// FS(x) = B(me, y) + \sum Q_i(me, y) = B'(me, y) + /// ```` + fn share_distribution(&self, req: &FetchRequest) -> Result; + + /// Tries to fetch proactive bivariate shares from the given nodes. + /// + /// Bivariate shares should be fetched from all candidates for the new + /// committee, including our own, verified against verification matrices + /// whose checksums were published in the consensus layer, and summed + /// into a bivariate polynomial. + /// + /// Bivariate polynomial share: + /// ```text + /// S_i(y) = Q_i(me, y) (dealing phase or unchanged committee) + /// S_i(x) = Q_i(x, me) (committee changes) + /// ``` + fn proactivization(&self, req: &FetchRequest) -> Result; + + /// Returns a signed confirmation request containing the checksum + /// of the merged verification matrix. + fn confirmation(&self, req: &HandoffRequest) -> Result; + + /// Finalizes the specified scheme by cleaning up obsolete dealers, + /// handoffs, and shareholders. If the handoff was just completed, + /// the shareholder is made available, and its share is persisted + /// to the local storage. + fn finalize(&self, req: &HandoffRequest) -> Result<()>; +} + +/// Key manager application that implements churn-robust proactive secret +/// sharing scheme (CHURP). +pub struct Churp { + /// Host node identifier. + node_id: PublicKey, + /// Key manager runtime ID. + runtime_id: Namespace, + /// Runtime identity. + identity: Arc, + /// Low-level access to the underlying Runtime Host Protocol. + protocol: Arc, + /// Consensus verifier. + consensus_verifier: Arc, + /// Verified churp state. + churp_state: ChurpState, + + /// Cached instances. + instances: Mutex>>, + /// Cached verified policies. + policies: Arc, +} + +impl Churp { + pub fn new( + node_id: PublicKey, + identity: Arc, + protocol: Arc, + consensus_verifier: Arc, + ) -> Self { + let runtime_id = protocol.get_runtime_id(); + let churp_state = ChurpState::new(consensus_verifier.clone()); + let instances = Mutex::new(HashMap::new()); + let policies = Arc::new(VerifiedPolicies::new()); + + Self { + node_id, + runtime_id, + identity, + protocol, + consensus_verifier, + churp_state, + instances, + policies, } + } - let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?; - if !status.applications.contains_key(node_id) { - return Err(Error::NotInCommittee.into()); + fn get_instance(&self, churp_id: u8) -> Result> { + let mut instances = self.instances.lock().unwrap(); + + if let Some(instance) = instances.get(&churp_id) { + return Ok(instance.clone()); } - self.verify_node_id(ctx, node_id)?; - self.verify_km_enclave(ctx, &status.policy)?; + let status = self.churp_state.status(self.runtime_id, churp_id)?; + let instance = match status.suite_id { + SuiteId::NistP384Sha3_384 => Instance::::new( + churp_id, + self.node_id, + self.identity.clone(), + self.protocol.clone(), + self.consensus_verifier.clone(), + self.policies.clone(), + ), + }; + let instance = Arc::new(instance); + instances.insert(churp_id, instance.clone()); + + Ok(instance) + } +} + +impl Handler for Churp { + fn verification_matrix(&self, req: &QueryRequest) -> Result> { + self.get_instance(req.id)?.verification_matrix(req) + } + + fn share_reduction_switch_point( + &self, + ctx: &RpcContext, + req: &QueryRequest, + ) -> Result> { + self.get_instance(req.id)? + .share_reduction_switch_point(ctx, req) + } + + fn share_distribution_switch_point( + &self, + ctx: &RpcContext, + req: &QueryRequest, + ) -> Result> { + self.get_instance(req.id)? + .share_distribution_switch_point(ctx, req) + } + + fn bivariate_share( + &self, + ctx: &RpcContext, + req: &QueryRequest, + ) -> Result { + self.get_instance(req.id)?.bivariate_share(ctx, req) + } + + fn sgx_policy_key_share( + &self, + ctx: &RpcContext, + req: &KeyShareRequest, + ) -> Result { + self.get_instance(req.id)?.sgx_policy_key_share(ctx, req) + } + + fn init(&self, req: &HandoffRequest) -> Result { + self.get_instance(req.id)?.init(req) + } + + fn share_reduction(&self, req: &FetchRequest) -> Result { + self.get_instance(req.id)?.share_reduction(req) + } + + fn share_distribution(&self, req: &FetchRequest) -> Result { + self.get_instance(req.id)?.share_distribution(req) + } + + fn proactivization(&self, req: &FetchRequest) -> Result { + self.get_instance(req.id)?.proactivization(req) + } + + fn confirmation(&self, req: &HandoffRequest) -> Result { + self.get_instance(req.id)?.confirmation(req) + } + + fn finalize(&self, req: &HandoffRequest) -> Result<()> { + self.get_instance(req.id)?.finalize(req) + } +} + +struct Instance { + /// Host node identifier. + node_id: PublicKey, + /// Instance identifier. + churp_id: u8, + /// Key manager runtime ID. + runtime_id: Namespace, + /// Runtime identity. + identity: Arc, + /// Runtime attestation key signer. + signer: Arc, + + /// Storage handler. + storage: Storage, + /// Consensus verifier. + consensus_verifier: Arc, + /// Low-level access to the underlying Runtime Host Protocol. + protocol: Arc, + + /// Verified beacon state. + beacon_state: BeaconState, + /// Verified churp state. + churp_state: ChurpState, + /// Verified registry state. + registry_state: RegistryState, + + /// Shareholders with secret shares for completed handoffs. + shareholders: Mutex>>>, + /// Dealer of bivariate shares for the next handoff. + dealer: Mutex>>, + /// Next handoff. + handoff: Mutex>>, + + /// Cached verified policies. + policies: Arc, +} + +impl Instance { + pub fn new( + churp_id: u8, + node_id: PublicKey, + identity: Arc, + protocol: Arc, + consensus_verifier: Arc, + policies: Arc, + ) -> Self { + let runtime_id = protocol.get_runtime_id(); + let storage = Storage::new(Arc::new(ProtocolUntrustedLocalStorage::new( + protocol.clone(), + ))); + let signer: Arc = identity.clone(); + + let beacon_state = BeaconState::new(consensus_verifier.clone()); + let churp_state = ChurpState::new(consensus_verifier.clone()); + let registry_state = RegistryState::new(consensus_verifier.clone()); - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.derive_share_reduction_switch_point::(node_id, &status) - } + let shareholders = Mutex::new(HashMap::new()); + let dealer = Mutex::new(None); + let handoff = Mutex::new(None); + + Self { + churp_id, + identity, + signer, + node_id, + runtime_id, + protocol, + consensus_verifier, + storage, + beacon_state, + shareholders, + churp_state, + registry_state, + dealer, + handoff, + policies, } } - fn derive_share_reduction_switch_point( + fn derive_share_reduction_switch_point( &self, node_id: &PublicKey, status: &Status, - ) -> Result> - where - S: Suite, - { + ) -> Result> { let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); let x = encode_shareholder::(&node_id.0, &dst)?; - let shareholder = self.get_shareholder::(status.id, status.handoff)?; + let shareholder = self.get_shareholder(status.id, status.handoff)?; let point = shareholder.switch_point(&x); let point = scalar_to_bytes(&point); Ok(point) } - /// Returns switch point for full share distribution for the calling node. - /// - /// The point is evaluation of the proactivized shared secret bivariate - /// polynomial at the given x (node ID) and y value (me). - /// - /// Switch point: - /// ```text - /// Point = B(node_id, me) + \sum Q_i(node_id, me) - /// ``` - /// Bivariate polynomial: - /// ```text - /// B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j - /// ``` - /// Proactive bivariate polynomial: - /// ```text - /// Q_i(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j - /// ``` - /// - /// WARNING: This method must be called over a secure channel as the point - /// needs to be kept secret and generated only for authorized nodes. - pub fn share_distribution_switch_point( - &self, - ctx: &RpcContext, - req: &QueryRequest, - ) -> Result> { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; - - let kind = Self::handoff_kind(&status); - if !matches!(kind, HandoffKind::CommitteeChanged) { - return Err(Error::InvalidHandoff.into()); - } - - let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?; - if !status.applications.contains_key(node_id) { - return Err(Error::NotInCommittee.into()); - } - - self.verify_node_id(ctx, node_id)?; - self.verify_km_enclave(ctx, &status.policy)?; - - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.derive_share_distribution_point::(node_id, &status) - } - } - } - - fn derive_share_distribution_point( + fn derive_share_distribution_point( &self, node_id: &PublicKey, status: &Status, - ) -> Result> - where - S: Suite + 'static, - { + ) -> Result> { let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); let x = encode_shareholder::(&node_id.0, &dst)?; - let handoff = self.get_handoff::(status.id, status.next_handoff)?; + let handoff = self.get_handoff(status.next_handoff)?; let shareholder = handoff.get_reduced_shareholder()?; let point = shareholder.switch_point(&x); let point = scalar_to_bytes(&point); @@ -353,62 +562,15 @@ impl Churp { Ok(point) } - /// Returns proactive bivariate polynomial share for the calling node. - /// - /// A bivariate share is a partial evaluation of a randomly selected - /// bivariate polynomial at a specified x or y value (node ID). Each node - /// interested in joining the new committee selects a bivariate polynomial - /// before the next handoff and commits to it by submitting the checksum - /// of the corresponding verification matrix to the consensus layer. - /// The latter can be used to verify the received bivariate shares. - /// - /// Bivariate polynomial share: - /// ```text - /// S_i(y) = Q_i(node_id, y) (dealing phase or unchanged committee) - /// S_i(x) = Q_i(x, node_id) (committee changes) - /// ``` - /// Proactive bivariate polynomial: - /// ```text - /// Q_i(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j - /// ``` - /// - /// WARNING: This method must be called over a secure channel as - /// the polynomial needs to be kept secret and generated only - /// for authorized nodes. - pub fn bivariate_share( - &self, - ctx: &RpcContext, - req: &QueryRequest, - ) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; - - let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?; - if !status.applications.contains_key(node_id) { - return Err(Error::NotInCommittee.into()); - } - - self.verify_node_id(ctx, node_id)?; - self.verify_km_enclave(ctx, &status.policy)?; - - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.derive_bivariate_share::(node_id, &status) - } - } - } - - fn derive_bivariate_share( + fn derive_bivariate_share( &self, node_id: &PublicKey, status: &Status, - ) -> Result - where - S: Suite, - { + ) -> Result { let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); let x = encode_shareholder::(&node_id.0, &dst)?; let kind = Self::handoff_kind(status); - let dealer = self.get_dealer::(status.id, status.next_handoff)?; + let dealer = self.get_dealer(status.next_handoff)?; let share = dealer.make_share(x, kind); let share = (&share).into(); let verification_matrix = dealer.verification_matrix().to_bytes(); @@ -419,103 +581,21 @@ impl Churp { }) } - /// Returns the key share for the given key ID generated by the key - /// derivation center. - /// - /// Key share: - /// ```text - /// KS_i = s_i * H(key_id) - /// ``` - /// - /// WARNING: This method must be called over a secure channel as the key - /// share needs to be kept secret and generated only for authorized nodes. - pub fn sgx_policy_key_share( - &self, - ctx: &RpcContext, - req: &KeyShareRequest, - ) -> Result { - let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; - - self.verify_rt_enclave(ctx, &status.policy, &req.key_runtime_id)?; - - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.make_key_share::(&req.key_id.0, &status) - } - } - } - - fn make_key_share(&self, key_id: &[u8], status: &Status) -> Result - where - S: Suite, - { - let shareholder = self.get_shareholder::(status.id, status.handoff)?; + fn make_key_share(&self, key_id: &[u8], status: &Status) -> Result { + let shareholder = self.get_shareholder(status.id, status.handoff)?; let dst = self.domain_separation_tag(ENCODE_SGX_POLICY_KEY_ID_CONTEXT, status.id); let point = shareholder.make_key_share::(key_id, &dst)?; Ok((&point).into()) } - /// Prepare CHURP for participation in the given handoff of the protocol. - /// - /// Initialization randomly selects a bivariate polynomial for the given - /// handoff, computes the corresponding verification matrix and its - /// checksum, and signs the latter. - /// - /// Bivariate polynomial: - /// B(x,y) = \sum_{i=0}^{t_n} \sum_{j=0}^{t_m} b_{i,j} x^i y^j - /// - /// Verification matrix: - /// M = [b_{i,j} * G] - /// - /// Checksum: - /// H = KMAC256(M, runtime ID, handoff) - /// - /// The bivariate polynomial is zero-hole in all handoffs expect in the - /// first one (dealing phase). - /// - /// This method must be called locally. - pub fn init(&self, req: &HandoffRequest) -> Result { - if self.runtime_id != req.runtime_id { - return Err(Error::RuntimeMismatch.into()); - } - - let status = self.churp_state.status(self.runtime_id, req.id)?; - if status.next_handoff != req.epoch { - return Err(Error::HandoffMismatch.into()); - } - if status.next_handoff == HANDOFFS_DISABLED { - return Err(Error::HandoffsDisabled.into()); - } - if status.applications.contains_key(&self.node_id) { - return Err(Error::ApplicationSubmitted.into()); - } - - let now = self.beacon_state.epoch()?; - if status.next_handoff != now + 1 { - return Err(Error::ApplicationsClosed.into()); - } - - let dealing_phase = status.committee.is_empty(); - - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.do_init::(req.id, req.epoch, status.threshold, dealing_phase) - } - } - } - - fn do_init( + fn do_init( &self, churp_id: u8, epoch: EpochTime, threshold: u8, dealing_phase: bool, - ) -> Result - where - S: Suite, - { - let dealer = - self.get_or_create_dealer::(churp_id, epoch, threshold, dealing_phase)?; + ) -> Result { + let dealer = self.get_or_create_dealer(epoch, threshold, dealing_phase)?; // Fetch verification matrix and compute its checksum. let matrix = dealer.verification_matrix(); @@ -540,63 +620,26 @@ impl Churp { } /// Tries to fetch switch points for share reduction from the given nodes. - /// - /// Switch points should be obtained from (at least) t distinct nodes - /// belonging to the old committee, verified against verification matrix - /// whose checksum was published in the consensus layer, merged into - /// a reduced share using Lagrange interpolation and proactivized with - /// bivariate shares. - /// - /// Switch point: - /// ```text - /// P_i = B(node_i, me) - ///``` - /// Reduced share: - /// ```text - /// RS(x) = B(x, me) - /// ```` - /// Proactive reduced share: - /// ```text - /// QR(x) = RS(x) + \sum Q_i(x, me) - /// ```` - pub fn share_reduction(&self, req: &FetchRequest) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; - - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.fetch_share_reduction_switch_points::(&req.node_ids, &status) - } - } - } - - /// Tries to fetch switch points for share reduction from the given nodes. - pub fn fetch_share_reduction_switch_points( + pub fn fetch_share_reduction_switch_points( &self, node_ids: &Vec, status: &Status, - ) -> Result - where - S: Suite + 'static, - { - let handoff = self.get_or_create_handoff::(status)?; + ) -> Result { + let handoff = self.get_or_create_handoff(status)?; let client = self.key_manager_client(status, false)?; - let f = |node_id| { - self.fetch_share_reduction_switch_point::(node_id, status, &handoff, &client) - }; + let f = + |node_id| self.fetch_share_reduction_switch_point(node_id, status, &handoff, &client); fetch(f, node_ids) } /// Tries to fetch switch point for share reduction from the given node. - pub fn fetch_share_reduction_switch_point( + pub fn fetch_share_reduction_switch_point( &self, node_id: PublicKey, status: &Status, handoff: &Handoff, client: &RemoteClient, - ) -> Result - where - S: Suite, - { + ) -> Result { let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); let x = encode_shareholder::(&node_id.0, &dst)?; @@ -606,7 +649,7 @@ impl Churp { // Fetch from the host node. if node_id == self.node_id { - let shareholder = self.get_shareholder::(status.id, status.handoff)?; + let shareholder = self.get_shareholder(status.id, status.handoff)?; let point = shareholder.switch_point(&x); if handoff.needs_verification_matrix()? { @@ -650,60 +693,28 @@ impl Churp { handoff.add_share_reduction_switch_point(x, point) } - /// Tries to fetch switch data points for full share distribution from - /// the given nodes. - /// - /// Switch points should be obtained from (at least) 2t distinct nodes - /// belonging to the new committee, verified against the sum of the - /// verification matrix and the verification matrices of proactive - /// bivariate shares, whose checksums were published in the consensus - /// layer, and merged into a full share using Lagrange interpolation. - /// - /// Switch point: - /// ```text - /// P_i = B(me, node_i) + \sum Q_i(me, node_i) - ///``` - /// Full share: - /// ```text - /// FS(x) = B(me, y) + \sum Q_i(me, y) = B'(me, y) - /// ```` - pub fn share_distribution(&self, req: &FetchRequest) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; - - match status.suite_id { - SuiteId::NistP384Sha3_384 => self - .fetch_share_distribution_switch_points::(&req.node_ids, &status), - } - } - /// Tries to fetch switch points for share distribution from the given nodes. - pub fn fetch_share_distribution_switch_points( + pub fn fetch_share_distribution_switch_points( &self, node_ids: &Vec, status: &Status, - ) -> Result - where - S: Suite + 'static, - { - let handoff = self.get_handoff::(status.id, status.next_handoff)?; + ) -> Result { + let handoff = self.get_handoff(status.next_handoff)?; let client = self.key_manager_client(status, true)?; let f = |node_id| { - self.fetch_share_distribution_switch_point::(node_id, status, &handoff, &client) + self.fetch_share_distribution_switch_point(node_id, status, &handoff, &client) }; fetch(f, node_ids) } /// Tries to fetch switch point for share reduction from the given node. - pub fn fetch_share_distribution_switch_point( + pub fn fetch_share_distribution_switch_point( &self, node_id: PublicKey, status: &Status, handoff: &Handoff, - client: &RemoteClient, - ) -> Result - where - S: Suite, - { + client: &RemoteClient, + ) -> Result { let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); let x = encode_shareholder::(&node_id.0, &dst)?; @@ -732,53 +743,25 @@ impl Churp { } /// Tries to fetch proactive bivariate shares from the given nodes. - /// - /// Bivariate shares should be fetched from all candidates for the new - /// committee, including our own, verified against verification matrices - /// whose checksums were published in the consensus layer, and summed - /// into a bivariate polynomial. - /// - /// Bivariate polynomial share: - /// ```text - /// S_i(y) = Q_i(me, y) (dealing phase or unchanged committee) - /// S_i(x) = Q_i(x, me) (committee changes) - /// ``` - pub fn proactivization(&self, req: &FetchRequest) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; - - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.fetch_bivariate_shares::(&req.node_ids, &status) - } - } - } - - /// Tries to fetch proactive bivariate shares from the given nodes. - pub fn fetch_bivariate_shares( + pub fn fetch_bivariate_shares( &self, node_ids: &Vec, status: &Status, - ) -> Result - where - S: Suite + 'static, - { - let handoff = self.get_or_create_handoff::(status)?; + ) -> Result { + let handoff = self.get_or_create_handoff(status)?; let client = self.key_manager_client(status, true)?; - let f = |node_id| self.fetch_bivariate_share::(node_id, status, &handoff, &client); + let f = |node_id| self.fetch_bivariate_share(node_id, status, &handoff, &client); fetch(f, node_ids) } /// Tries to fetch proactive bivariate share from the given node. - pub fn fetch_bivariate_share( + pub fn fetch_bivariate_share( &self, node_id: PublicKey, status: &Status, handoff: &Handoff, client: &RemoteClient, - ) -> Result - where - S: Suite, - { + ) -> Result { let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); let x = encode_shareholder::(&node_id.0, &dst)?; @@ -789,7 +772,7 @@ impl Churp { // Fetch from the host node. if node_id == self.node_id { let kind = Self::handoff_kind(status); - let dealer = self.get_dealer::(status.id, status.next_handoff)?; + let dealer = self.get_dealer(status.next_handoff)?; let share = dealer.make_share(x, kind); let vm = dealer.verification_matrix().clone(); let verifiable_share = VerifiableSecretShare::new(share, vm); @@ -823,32 +806,15 @@ impl Churp { handoff.add_bivariate_share(&x, verifiable_share) } - /// Returns a signed confirmation request containing the checksum - /// of the merged verification matrix. - pub fn confirmation(&self, req: &HandoffRequest) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; - - if !status.applications.contains_key(&self.node_id) { - return Err(Error::ApplicationNotSubmitted.into()); - } - - match status.suite_id { - SuiteId::NistP384Sha3_384 => self.prepare_confirmation::(&status), - } - } - - fn prepare_confirmation(&self, status: &Status) -> Result - where - S: Suite + 'static, - { - let handoff = self.get_handoff::(status.id, status.next_handoff)?; + fn prepare_confirmation(&self, status: &Status) -> Result { + let handoff = self.get_handoff(status.next_handoff)?; let shareholder = handoff.get_full_shareholder()?; let share = shareholder.verifiable_share(); // Before overwriting the next secret share, make sure it was copied // and used to construct the last shareholder. let _ = self - .get_shareholder::(status.id, status.handoff) + .get_shareholder(status.id, status.handoff) .map(Some) .or_else(|err| ignore_error(err, Error::ShareholderNotFound))?; // Ignore if we don't have the correct share. @@ -877,24 +843,9 @@ impl Churp { }) } - /// Finalizes the specified scheme by cleaning up obsolete dealers, - /// handoffs, and shareholders. If the handoff was just completed, - /// the shareholder is made available, and its share is persisted - /// to the local storage. - pub fn finalize(&self, req: &HandoffRequest) -> Result<()> { - let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; - - match status.suite_id { - SuiteId::NistP384Sha3_384 => self.do_finalize::(&status), - } - } - - fn do_finalize(&self, status: &Status) -> Result<()> - where - S: Suite + 'static, - { + fn do_finalize(&self, status: &Status) -> Result<()> { // Move the shareholder if the handoff was completed. - let handoff = self.get_handoff::(status.id, status.handoff); + let handoff = self.get_handoff(status.handoff); let handoff = match handoff { Ok(handoff) => Some(handoff), Err(err) => match err.downcast_ref::() { @@ -907,49 +858,32 @@ impl Churp { let share = shareholder.verifiable_share(); self.storage .store_secret_share(share, status.id, status.handoff)?; - self.add_shareholder(shareholder, status.id, status.handoff); + self.add_shareholder(shareholder, status.handoff); } // Cleanup. let max_epoch = status.handoff.saturating_sub(1); - self.remove_shareholder(status.id, max_epoch); + self.remove_shareholders(max_epoch); let max_epoch = status.next_handoff.saturating_sub(1); - self.remove_dealer(status.id, max_epoch); - self.remove_handoff(status.id, max_epoch); + self.remove_dealer(max_epoch); + self.remove_handoff(max_epoch); Ok(()) } /// Returns the shareholder for the specified scheme and handoff epoch. - fn get_shareholder( + fn get_shareholder( &self, churp_id: u8, epoch: EpochTime, - ) -> Result>> - where - S: Suite, - { + ) -> Result>> { // Check the memory first. Make sure to lock the new shareholders // so that we don't create two shareholders for the same handoff. let mut shareholders = self.shareholders.lock().unwrap(); - if let Some(data) = shareholders.get(&churp_id) { - match epoch.cmp(&data.epoch) { - cmp::Ordering::Less => return Err(Error::InvalidHandoff.into()), - cmp::Ordering::Equal => { - // Downcasting should never fail because the consensus - // ensures that the suite ID cannot change. - let shareholder = data - .object - .clone() - .downcast::>() - .or(Err(Error::ShareholderMismatch))?; - - return Ok(shareholder); - } - cmp::Ordering::Greater => (), - } + if let Some(shareholder) = shareholders.get(&epoch) { + return Ok(shareholder.clone()); } // Fetch shareholder's secret share from the local storage and use it @@ -989,102 +923,54 @@ impl Churp { // Create a new shareholder. let shareholder = Arc::new(Shareholder::from(share)); - let data = HandoffData { - epoch, - object: shareholder.clone(), - }; - shareholders.insert(churp_id, data); + shareholders.insert(epoch, shareholder.clone()); Ok(shareholder) } /// Adds a shareholder for the specified scheme and handoff epoch. - fn add_shareholder(&self, shareholder: Arc>, churp_id: u8, epoch: EpochTime) - where - G: Group + GroupEncoding, - { + fn add_shareholder(&self, shareholder: Arc>, epoch: EpochTime) { let mut shareholders = self.shareholders.lock().unwrap(); - - if let Some(data) = shareholders.get(&churp_id) { - if epoch <= data.epoch { - return; - } - } - - let data = HandoffData { - epoch, - object: shareholder, - }; - shareholders.insert(churp_id, data); + shareholders.insert(epoch, shareholder); } - /// Removes shareholder for the specified scheme if the shareholder belongs - /// to a handoff that happened at or before the given epoch. - fn remove_shareholder(&self, churp_id: u8, max_epoch: EpochTime) { + /// Removes shareholders that belong to a handoff that happened at or before + /// the given epoch. + fn remove_shareholders(&self, max_epoch: EpochTime) { let mut shareholders = self.shareholders.lock().unwrap(); - let data = match shareholders.get(&churp_id) { - Some(data) => data, - None => return, - }; - - if data.epoch > max_epoch { - return; - } - - shareholders.remove(&churp_id); + shareholders.retain(|&epoch, _| epoch > max_epoch); } - /// Returns the dealer for the specified scheme and handoff epoch. - fn get_dealer(&self, churp_id: u8, epoch: EpochTime) -> Result>> - where - G: Group + GroupEncoding, - { - self._get_or_create_dealer(churp_id, epoch, None, None) + /// Returns the dealer for the specified handoff epoch. + fn get_dealer(&self, epoch: EpochTime) -> Result>> { + self._get_or_create_dealer(epoch, None, None) } - /// Returns the dealer for the specified scheme and handoff epoch. - /// If the dealer doesn't exist, a new one is created. - fn get_or_create_dealer( + /// Returns the dealer for the specified handoff epoch. If the dealer + /// doesn't exist, a new one is created. + fn get_or_create_dealer( &self, - churp_id: u8, epoch: EpochTime, threshold: u8, dealing_phase: bool, - ) -> Result>> - where - G: Group + GroupEncoding, - { - self._get_or_create_dealer(churp_id, epoch, Some(threshold), Some(dealing_phase)) + ) -> Result>> { + self._get_or_create_dealer(epoch, Some(threshold), Some(dealing_phase)) } - fn _get_or_create_dealer( + fn _get_or_create_dealer( &self, - churp_id: u8, epoch: EpochTime, threshold: Option, dealing_phase: Option, - ) -> Result>> - where - G: Group + GroupEncoding, - { - // Check the memory first. Make sure to lock the dealers so that we + ) -> Result>> { + // Check the memory first. Make sure to lock the dealer so that we // don't create two dealers for the same handoff. - let mut dealers = self.dealers.lock().unwrap(); + let mut dealer_guard = self.dealer.lock().unwrap(); - if let Some(data) = dealers.get(&churp_id) { - match epoch.cmp(&data.epoch) { + if let Some(dealer_info) = dealer_guard.as_ref() { + match epoch.cmp(&dealer_info.epoch) { cmp::Ordering::Less => return Err(Error::InvalidHandoff.into()), - cmp::Ordering::Equal => { - // Downcasting should never fail because the consensus - // ensures that the suite ID cannot change. - let dealer = data - .object - .clone() - .downcast::>() - .or(Err(Error::DealerMismatch))?; - - return Ok(dealer); - } + cmp::Ordering::Equal => return Ok(dealer_info.dealer.clone()), cmp::Ordering::Greater => (), } } @@ -1094,7 +980,7 @@ impl Churp { // host has cleared the storage. let polynomial = self .storage - .load_bivariate_polynomial(churp_id, epoch) + .load_bivariate_polynomial(self.churp_id, epoch) .or_else(|err| ignore_error(err, Error::InvalidBivariatePolynomial))?; // Ignore previous dealers. let dealer = match polynomial { @@ -1122,7 +1008,7 @@ impl Churp { // Encrypt and store the polynomial in case of a restart. let polynomial = dealer.bivariate_polynomial(); self.storage - .store_bivariate_polynomial(polynomial, churp_id, epoch)?; + .store_bivariate_polynomial(polynomial, self.churp_id, epoch)?; dealer } @@ -1130,75 +1016,49 @@ impl Churp { // Create a new dealer. let dealer = Arc::new(dealer); - let data = HandoffData { + *dealer_guard = Some(DealerInfo { epoch, - object: dealer.clone(), - }; - dealers.insert(churp_id, data); + dealer: dealer.clone(), + }); Ok(dealer) } - /// Removes the dealer for the specified scheme if the dealer belongs - /// to a handoff that happened at or before the given epoch. - fn remove_dealer(&self, churp_id: u8, max_epoch: EpochTime) { - let mut dealers = self.dealers.lock().unwrap(); - let data = match dealers.get(&churp_id) { - Some(data) => data, - None => return, - }; - - if data.epoch > max_epoch { - return; + /// Removes the dealer if it belongs to a handoff that occurred + /// at or before the given epoch. + fn remove_dealer(&self, max_epoch: EpochTime) { + let mut dealer_guard = self.dealer.lock().unwrap(); + if let Some(dealer_info) = dealer_guard.as_ref() { + if dealer_info.epoch <= max_epoch { + *dealer_guard = None; + } } - - dealers.remove(&churp_id); } - /// Returns the handoff for the specified scheme and handoff epoch. - fn get_handoff(&self, churp_id: u8, epoch: EpochTime) -> Result>> - where - S: Suite + 'static, - { - self._get_or_create_handoff::(churp_id, epoch, None) + /// Returns the handoff for the specified handoff epoch. + fn get_handoff(&self, epoch: EpochTime) -> Result>> { + self._get_or_create_handoff(epoch, None) } - /// Returns the handoff for the specified scheme and the next handoff epoch. - /// If the handoff doesn't exist, a new one is created. - fn get_or_create_handoff(&self, status: &Status) -> Result>> - where - S: Suite + 'static, - { - self._get_or_create_handoff::(status.id, status.next_handoff, Some(status)) + /// Returns the handoff for the next handoff epoch. If the handoff doesn't + /// exist, a new one is created. + fn get_or_create_handoff(&self, status: &Status) -> Result>> { + self._get_or_create_handoff(status.next_handoff, Some(status)) } - fn _get_or_create_handoff( + fn _get_or_create_handoff( &self, - churp_id: u8, epoch: EpochTime, status: Option<&Status>, - ) -> Result>> - where - S: Suite + 'static, - { - // Check the memory first. Make sure to lock the handoffs so that we + ) -> Result>> { + // Check the memory first. Make sure to lock the handoff so that we // don't create two handoffs for the same epoch. - let mut handoffs = self.handoffs.lock().unwrap(); + let mut handoff_guard = self.handoff.lock().unwrap(); - if let Some(data) = handoffs.get(&churp_id) { - match epoch.cmp(&data.epoch) { + if let Some(handoff_info) = handoff_guard.as_ref() { + match epoch.cmp(&handoff_info.epoch) { cmp::Ordering::Less => return Err(Error::InvalidHandoff.into()), - cmp::Ordering::Equal => { - // Downcasting should never fail because the consensus - // ensures that the suite ID cannot change. - let handoff = data - .object - .clone() - .downcast::>() - .or(Err(Error::HandoffDowncastFailed))?; - - return Ok(handoff); - } + cmp::Ordering::Equal => return Ok(handoff_info.handoff.clone()), cmp::Ordering::Greater => (), } } @@ -1219,34 +1079,27 @@ impl Churp { let handoff = Handoff::new(threshold, me, shareholders, kind)?; if kind == HandoffKind::CommitteeUnchanged { - let shareholder = self.get_shareholder::(churp_id, status.handoff)?; + let shareholder = self.get_shareholder(self.churp_id, status.handoff)?; handoff.set_shareholder(shareholder)?; } let handoff = Arc::new(handoff); - let data = HandoffData { + *handoff_guard = Some(HandoffInfo { epoch, - object: handoff.clone(), - }; - handoffs.insert(churp_id, data); + handoff: handoff.clone(), + }); Ok(handoff) } - /// Removes the dealer for the specified scheme if the dealer belongs - /// to a handoff that happened at or before the given epoch. - fn remove_handoff(&self, churp_id: u8, max_epoch: EpochTime) { - let mut handoffs = self.handoffs.lock().unwrap(); - let data = match handoffs.get(&churp_id) { - Some(data) => data, - None => return, - }; - - if data.epoch > max_epoch { - return; + // Removes the handoff if it happened at or before the given epoch. + fn remove_handoff(&self, max_epoch: EpochTime) { + let mut handoff_guard = self.handoff.lock().unwrap(); + if let Some(handoff_info) = handoff_guard.as_ref() { + if handoff_info.epoch <= max_epoch { + *handoff_guard = None; + } } - - handoffs.remove(&churp_id); } /// Verifies parameters of the last successfully completed handoff against @@ -1458,6 +1311,183 @@ impl Churp { } } +impl Handler for Instance { + fn verification_matrix(&self, req: &QueryRequest) -> Result> { + let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; + let shareholder = match status.suite_id { + SuiteId::NistP384Sha3_384 => self.get_shareholder(req.id, req.epoch)?, + }; + let vm = shareholder + .verifiable_share() + .verification_matrix() + .to_bytes(); + + Ok(vm) + } + + fn share_reduction_switch_point( + &self, + ctx: &RpcContext, + req: &QueryRequest, + ) -> Result> { + let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + + let kind = Self::handoff_kind(&status); + if !matches!(kind, HandoffKind::CommitteeChanged) { + return Err(Error::InvalidHandoff.into()); + } + + let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?; + if !status.applications.contains_key(node_id) { + return Err(Error::NotInCommittee.into()); + } + + self.verify_node_id(ctx, node_id)?; + self.verify_km_enclave(ctx, &status.policy)?; + + match status.suite_id { + SuiteId::NistP384Sha3_384 => self.derive_share_reduction_switch_point(node_id, &status), + } + } + + fn share_distribution_switch_point( + &self, + ctx: &RpcContext, + req: &QueryRequest, + ) -> Result> { + let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + + let kind = Self::handoff_kind(&status); + if !matches!(kind, HandoffKind::CommitteeChanged) { + return Err(Error::InvalidHandoff.into()); + } + + let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?; + if !status.applications.contains_key(node_id) { + return Err(Error::NotInCommittee.into()); + } + + self.verify_node_id(ctx, node_id)?; + self.verify_km_enclave(ctx, &status.policy)?; + + match status.suite_id { + SuiteId::NistP384Sha3_384 => self.derive_share_distribution_point(node_id, &status), + } + } + + fn bivariate_share( + &self, + ctx: &RpcContext, + req: &QueryRequest, + ) -> Result { + let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + + let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?; + if !status.applications.contains_key(node_id) { + return Err(Error::NotInCommittee.into()); + } + + self.verify_node_id(ctx, node_id)?; + self.verify_km_enclave(ctx, &status.policy)?; + + match status.suite_id { + SuiteId::NistP384Sha3_384 => self.derive_bivariate_share(node_id, &status), + } + } + + fn sgx_policy_key_share( + &self, + ctx: &RpcContext, + req: &KeyShareRequest, + ) -> Result { + let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; + + self.verify_rt_enclave(ctx, &status.policy, &req.key_runtime_id)?; + + match status.suite_id { + SuiteId::NistP384Sha3_384 => self.make_key_share(&req.key_id.0, &status), + } + } + + fn init(&self, req: &HandoffRequest) -> Result { + if self.runtime_id != req.runtime_id { + return Err(Error::RuntimeMismatch.into()); + } + + let status = self.churp_state.status(self.runtime_id, req.id)?; + if status.next_handoff != req.epoch { + return Err(Error::HandoffMismatch.into()); + } + if status.next_handoff == HANDOFFS_DISABLED { + return Err(Error::HandoffsDisabled.into()); + } + if status.applications.contains_key(&self.node_id) { + return Err(Error::ApplicationSubmitted.into()); + } + + let now = self.beacon_state.epoch()?; + if status.next_handoff != now + 1 { + return Err(Error::ApplicationsClosed.into()); + } + + let dealing_phase = status.committee.is_empty(); + + match status.suite_id { + SuiteId::NistP384Sha3_384 => { + self.do_init(req.id, req.epoch, status.threshold, dealing_phase) + } + } + } + + fn share_reduction(&self, req: &FetchRequest) -> Result { + let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + + match status.suite_id { + SuiteId::NistP384Sha3_384 => { + self.fetch_share_reduction_switch_points(&req.node_ids, &status) + } + } + } + + fn share_distribution(&self, req: &FetchRequest) -> Result { + let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + + match status.suite_id { + SuiteId::NistP384Sha3_384 => { + self.fetch_share_distribution_switch_points(&req.node_ids, &status) + } + } + } + + fn proactivization(&self, req: &FetchRequest) -> Result { + let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + + match status.suite_id { + SuiteId::NistP384Sha3_384 => self.fetch_bivariate_shares(&req.node_ids, &status), + } + } + + fn confirmation(&self, req: &HandoffRequest) -> Result { + let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + + if !status.applications.contains_key(&self.node_id) { + return Err(Error::ApplicationNotSubmitted.into()); + } + + match status.suite_id { + SuiteId::NistP384Sha3_384 => self.prepare_confirmation(&status), + } + } + + fn finalize(&self, req: &HandoffRequest) -> Result<()> { + let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; + + match status.suite_id { + SuiteId::NistP384Sha3_384 => self.do_finalize(&status), + } + } +} + /// Replaces the given error with `Ok(None)`. fn ignore_error(err: anyhow::Error, ignore: Error) -> Result> { match err.downcast_ref::() { diff --git a/keymanager/src/churp/methods.rs b/keymanager/src/churp/methods.rs index b8212434e2d..88e3951c75e 100644 --- a/keymanager/src/churp/methods.rs +++ b/keymanager/src/churp/methods.rs @@ -6,7 +6,7 @@ use oasis_core_runtime::enclave_rpc::{ types::Kind as RpcKind, }; -use crate::churp::Churp; +use crate::churp::{Churp, Handler}; /// Name of the `init` method. pub const METHOD_INIT: &str = "churp/init"; From ebdf5de39650cdea9f8247a265dc5629c0f9556c Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Thu, 18 Jul 2024 19:08:15 +0200 Subject: [PATCH 3/8] keymanager/src/churp: Remove helper functions --- keymanager/src/churp/handler.rs | 355 ++++++++++++-------------------- 1 file changed, 127 insertions(+), 228 deletions(-) diff --git a/keymanager/src/churp/handler.rs b/keymanager/src/churp/handler.rs index 1e49a2847d6..84de0dc69b1 100644 --- a/keymanager/src/churp/handler.rs +++ b/keymanager/src/churp/handler.rs @@ -533,105 +533,6 @@ impl Instance { } } - fn derive_share_reduction_switch_point( - &self, - node_id: &PublicKey, - status: &Status, - ) -> Result> { - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let x = encode_shareholder::(&node_id.0, &dst)?; - let shareholder = self.get_shareholder(status.id, status.handoff)?; - let point = shareholder.switch_point(&x); - let point = scalar_to_bytes(&point); - - Ok(point) - } - - fn derive_share_distribution_point( - &self, - node_id: &PublicKey, - status: &Status, - ) -> Result> { - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let x = encode_shareholder::(&node_id.0, &dst)?; - let handoff = self.get_handoff(status.next_handoff)?; - let shareholder = handoff.get_reduced_shareholder()?; - let point = shareholder.switch_point(&x); - let point = scalar_to_bytes(&point); - - Ok(point) - } - - fn derive_bivariate_share( - &self, - node_id: &PublicKey, - status: &Status, - ) -> Result { - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let x = encode_shareholder::(&node_id.0, &dst)?; - let kind = Self::handoff_kind(status); - let dealer = self.get_dealer(status.next_handoff)?; - let share = dealer.make_share(x, kind); - let share = (&share).into(); - let verification_matrix = dealer.verification_matrix().to_bytes(); - - Ok(EncodedVerifiableSecretShare { - share, - verification_matrix, - }) - } - - fn make_key_share(&self, key_id: &[u8], status: &Status) -> Result { - let shareholder = self.get_shareholder(status.id, status.handoff)?; - let dst = self.domain_separation_tag(ENCODE_SGX_POLICY_KEY_ID_CONTEXT, status.id); - let point = shareholder.make_key_share::(key_id, &dst)?; - Ok((&point).into()) - } - - fn do_init( - &self, - churp_id: u8, - epoch: EpochTime, - threshold: u8, - dealing_phase: bool, - ) -> Result { - let dealer = self.get_or_create_dealer(epoch, threshold, dealing_phase)?; - - // Fetch verification matrix and compute its checksum. - let matrix = dealer.verification_matrix(); - let checksum = Self::checksum_verification_matrix(matrix, self.runtime_id, churp_id, epoch); - - // Prepare response and sign it with RAK. - let application = ApplicationRequest { - id: churp_id, - runtime_id: self.runtime_id, - epoch, - checksum, - }; - let body = cbor::to_vec(application.clone()); - let signature = self - .signer - .sign(APPLICATION_REQUEST_SIGNATURE_CONTEXT, &body)?; - - Ok(SignedApplicationRequest { - application, - signature, - }) - } - - /// Tries to fetch switch points for share reduction from the given nodes. - pub fn fetch_share_reduction_switch_points( - &self, - node_ids: &Vec, - status: &Status, - ) -> Result { - let handoff = self.get_or_create_handoff(status)?; - let client = self.key_manager_client(status, false)?; - let f = - |node_id| self.fetch_share_reduction_switch_point(node_id, status, &handoff, &client); - fetch(f, node_ids) - } - /// Tries to fetch switch point for share reduction from the given node. pub fn fetch_share_reduction_switch_point( &self, @@ -693,20 +594,6 @@ impl Instance { handoff.add_share_reduction_switch_point(x, point) } - /// Tries to fetch switch points for share distribution from the given nodes. - pub fn fetch_share_distribution_switch_points( - &self, - node_ids: &Vec, - status: &Status, - ) -> Result { - let handoff = self.get_handoff(status.next_handoff)?; - let client = self.key_manager_client(status, true)?; - let f = |node_id| { - self.fetch_share_distribution_switch_point(node_id, status, &handoff, &client) - }; - fetch(f, node_ids) - } - /// Tries to fetch switch point for share reduction from the given node. pub fn fetch_share_distribution_switch_point( &self, @@ -742,18 +629,6 @@ impl Instance { handoff.add_full_share_distribution_switch_point(x, point) } - /// Tries to fetch proactive bivariate shares from the given nodes. - pub fn fetch_bivariate_shares( - &self, - node_ids: &Vec, - status: &Status, - ) -> Result { - let handoff = self.get_or_create_handoff(status)?; - let client = self.key_manager_client(status, true)?; - let f = |node_id| self.fetch_bivariate_share(node_id, status, &handoff, &client); - fetch(f, node_ids) - } - /// Tries to fetch proactive bivariate share from the given node. pub fn fetch_bivariate_share( &self, @@ -806,72 +681,6 @@ impl Instance { handoff.add_bivariate_share(&x, verifiable_share) } - fn prepare_confirmation(&self, status: &Status) -> Result { - let handoff = self.get_handoff(status.next_handoff)?; - let shareholder = handoff.get_full_shareholder()?; - let share = shareholder.verifiable_share(); - - // Before overwriting the next secret share, make sure it was copied - // and used to construct the last shareholder. - let _ = self - .get_shareholder(status.id, status.handoff) - .map(Some) - .or_else(|err| ignore_error(err, Error::ShareholderNotFound))?; // Ignore if we don't have the correct share. - - // Always persist the secret share before sending confirmation. - self.storage - .store_next_secret_share(share, status.id, status.next_handoff)?; - - // Prepare response and sign it with RAK. - let vm = share.verification_matrix(); - let checksum = - Self::checksum_verification_matrix(vm, self.runtime_id, status.id, status.next_handoff); - let confirmation = ConfirmationRequest { - id: status.id, - runtime_id: self.runtime_id, - epoch: status.next_handoff, - checksum, - }; - let body = cbor::to_vec(confirmation.clone()); - let signature = self - .signer - .sign(CONFIRMATION_REQUEST_SIGNATURE_CONTEXT, &body)?; - - Ok(SignedConfirmationRequest { - confirmation, - signature, - }) - } - - fn do_finalize(&self, status: &Status) -> Result<()> { - // Move the shareholder if the handoff was completed. - let handoff = self.get_handoff(status.handoff); - let handoff = match handoff { - Ok(handoff) => Some(handoff), - Err(err) => match err.downcast_ref::() { - Some(err) if err == &Error::HandoffNotFound => None, - _ => return Err(err), - }, - }; - if let Some(handoff) = handoff { - let shareholder = handoff.get_full_shareholder()?; - let share = shareholder.verifiable_share(); - self.storage - .store_secret_share(share, status.id, status.handoff)?; - self.add_shareholder(shareholder, status.handoff); - } - - // Cleanup. - let max_epoch = status.handoff.saturating_sub(1); - self.remove_shareholders(max_epoch); - - let max_epoch = status.next_handoff.saturating_sub(1); - self.remove_dealer(max_epoch); - self.remove_handoff(max_epoch); - - Ok(()) - } - /// Returns the shareholder for the specified scheme and handoff epoch. fn get_shareholder( &self, @@ -1345,9 +1154,13 @@ impl Handler for Instance { self.verify_node_id(ctx, node_id)?; self.verify_km_enclave(ctx, &status.policy)?; - match status.suite_id { - SuiteId::NistP384Sha3_384 => self.derive_share_reduction_switch_point(node_id, &status), - } + let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); + let x = encode_shareholder::(&node_id.0, &dst)?; + let shareholder = self.get_shareholder(status.id, status.handoff)?; + let point = shareholder.switch_point(&x); + let point = scalar_to_bytes(&point); + + Ok(point) } fn share_distribution_switch_point( @@ -1370,9 +1183,14 @@ impl Handler for Instance { self.verify_node_id(ctx, node_id)?; self.verify_km_enclave(ctx, &status.policy)?; - match status.suite_id { - SuiteId::NistP384Sha3_384 => self.derive_share_distribution_point(node_id, &status), - } + let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); + let x = encode_shareholder::(&node_id.0, &dst)?; + let handoff = self.get_handoff(status.next_handoff)?; + let shareholder = handoff.get_reduced_shareholder()?; + let point = shareholder.switch_point(&x); + let point = scalar_to_bytes(&point); + + Ok(point) } fn bivariate_share( @@ -1390,9 +1208,18 @@ impl Handler for Instance { self.verify_node_id(ctx, node_id)?; self.verify_km_enclave(ctx, &status.policy)?; - match status.suite_id { - SuiteId::NistP384Sha3_384 => self.derive_bivariate_share(node_id, &status), - } + let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); + let x = encode_shareholder::(&node_id.0, &dst)?; + let kind = Self::handoff_kind(&status); + let dealer = self.get_dealer(status.next_handoff)?; + let share = dealer.make_share(x, kind); + let share = (&share).into(); + let verification_matrix = dealer.verification_matrix().to_bytes(); + + Ok(EncodedVerifiableSecretShare { + share, + verification_matrix, + }) } fn sgx_policy_key_share( @@ -1404,9 +1231,10 @@ impl Handler for Instance { self.verify_rt_enclave(ctx, &status.policy, &req.key_runtime_id)?; - match status.suite_id { - SuiteId::NistP384Sha3_384 => self.make_key_share(&req.key_id.0, &status), - } + let shareholder = self.get_shareholder(status.id, status.handoff)?; + let dst = self.domain_separation_tag(ENCODE_SGX_POLICY_KEY_ID_CONTEXT, status.id); + let point = shareholder.make_key_share::(&req.key_id.0, &dst)?; + Ok((&point).into()) } fn init(&self, req: &HandoffRequest) -> Result { @@ -1431,40 +1259,57 @@ impl Handler for Instance { } let dealing_phase = status.committee.is_empty(); + let dealer = self.get_or_create_dealer(req.epoch, status.threshold, dealing_phase)?; - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.do_init(req.id, req.epoch, status.threshold, dealing_phase) - } - } + // Fetch verification matrix and compute its checksum. + let matrix = dealer.verification_matrix(); + let checksum = + Self::checksum_verification_matrix(matrix, self.runtime_id, req.id, req.epoch); + + // Prepare response and sign it with RAK. + let application = ApplicationRequest { + id: req.id, + runtime_id: self.runtime_id, + epoch: req.epoch, + checksum, + }; + let body = cbor::to_vec(application.clone()); + let signature = self + .signer + .sign(APPLICATION_REQUEST_SIGNATURE_CONTEXT, &body)?; + + Ok(SignedApplicationRequest { + application, + signature, + }) } fn share_reduction(&self, req: &FetchRequest) -> Result { let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.fetch_share_reduction_switch_points(&req.node_ids, &status) - } - } + let handoff = self.get_or_create_handoff(&status)?; + let client = self.key_manager_client(&status, false)?; + let f = + |node_id| self.fetch_share_reduction_switch_point(node_id, &status, &handoff, &client); + fetch(f, &req.node_ids) } fn share_distribution(&self, req: &FetchRequest) -> Result { let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; - - match status.suite_id { - SuiteId::NistP384Sha3_384 => { - self.fetch_share_distribution_switch_points(&req.node_ids, &status) - } - } + let handoff = self.get_handoff(status.next_handoff)?; + let client = self.key_manager_client(&status, true)?; + let f = |node_id| { + self.fetch_share_distribution_switch_point(node_id, &status, &handoff, &client) + }; + fetch(f, &req.node_ids) } fn proactivization(&self, req: &FetchRequest) -> Result { let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; - - match status.suite_id { - SuiteId::NistP384Sha3_384 => self.fetch_bivariate_shares(&req.node_ids, &status), - } + let handoff = self.get_or_create_handoff(&status)?; + let client = self.key_manager_client(&status, true)?; + let f = |node_id| self.fetch_bivariate_share(node_id, &status, &handoff, &client); + fetch(f, &req.node_ids) } fn confirmation(&self, req: &HandoffRequest) -> Result { @@ -1474,17 +1319,71 @@ impl Handler for Instance { return Err(Error::ApplicationNotSubmitted.into()); } - match status.suite_id { - SuiteId::NistP384Sha3_384 => self.prepare_confirmation(&status), - } + let handoff = self.get_handoff(status.next_handoff)?; + let shareholder = handoff.get_full_shareholder()?; + let share = shareholder.verifiable_share(); + + // Before overwriting the next secret share, make sure it was copied + // and used to construct the last shareholder. + let _ = self + .get_shareholder(status.id, status.handoff) + .map(Some) + .or_else(|err| ignore_error(err, Error::ShareholderNotFound))?; // Ignore if we don't have the correct share. + + // Always persist the secret share before sending confirmation. + self.storage + .store_next_secret_share(share, status.id, status.next_handoff)?; + + // Prepare response and sign it with RAK. + let vm = share.verification_matrix(); + let checksum = + Self::checksum_verification_matrix(vm, self.runtime_id, status.id, status.next_handoff); + let confirmation = ConfirmationRequest { + id: status.id, + runtime_id: self.runtime_id, + epoch: status.next_handoff, + checksum, + }; + let body = cbor::to_vec(confirmation.clone()); + let signature = self + .signer + .sign(CONFIRMATION_REQUEST_SIGNATURE_CONTEXT, &body)?; + + Ok(SignedConfirmationRequest { + confirmation, + signature, + }) } fn finalize(&self, req: &HandoffRequest) -> Result<()> { let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; - match status.suite_id { - SuiteId::NistP384Sha3_384 => self.do_finalize(&status), + // Move the shareholder if the handoff was completed. + let handoff = self.get_handoff(status.handoff); + let handoff = match handoff { + Ok(handoff) => Some(handoff), + Err(err) => match err.downcast_ref::() { + Some(err) if err == &Error::HandoffNotFound => None, + _ => return Err(err), + }, + }; + if let Some(handoff) = handoff { + let shareholder = handoff.get_full_shareholder()?; + let share = shareholder.verifiable_share(); + self.storage + .store_secret_share(share, status.id, status.handoff)?; + self.add_shareholder(shareholder, status.handoff); } + + // Cleanup. + let max_epoch = status.handoff.saturating_sub(1); + self.remove_shareholders(max_epoch); + + let max_epoch = status.next_handoff.saturating_sub(1); + self.remove_dealer(max_epoch); + self.remove_handoff(max_epoch); + + Ok(()) } } From fb5bcf83b4d6ac5f2b0376543472eacb04fb58c4 Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Fri, 19 Jul 2024 09:57:16 +0200 Subject: [PATCH 4/8] keymanager/src/churp: Compute domain separation tags only once --- keymanager/src/churp/handler.rs | 46 ++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 21 deletions(-) diff --git a/keymanager/src/churp/handler.rs b/keymanager/src/churp/handler.rs index 84de0dc69b1..c9f368214d3 100644 --- a/keymanager/src/churp/handler.rs +++ b/keymanager/src/churp/handler.rs @@ -489,6 +489,12 @@ struct Instance { /// Cached verified policies. policies: Arc, + + /// Domain separation tag for encoding shareholder identifiers. + shareholder_dst: Vec, + /// Domain separation tag for encoding key identifiers for key share + /// derivation approved by an SGX policy. + sgx_policy_key_id_dst: Vec, } impl Instance { @@ -514,6 +520,11 @@ impl Instance { let dealer = Mutex::new(None); let handoff = Mutex::new(None); + let shareholder_dst = + Self::domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, &runtime_id, churp_id); + let sgx_policy_key_id_dst = + Self::domain_separation_tag(ENCODE_SGX_POLICY_KEY_ID_CONTEXT, &runtime_id, churp_id); + Self { churp_id, identity, @@ -530,6 +541,8 @@ impl Instance { dealer, handoff, policies, + shareholder_dst, + sgx_policy_key_id_dst, } } @@ -541,8 +554,7 @@ impl Instance { handoff: &Handoff, client: &RemoteClient, ) -> Result { - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let x = encode_shareholder::(&node_id.0, &dst)?; + let x = encode_shareholder::(&node_id.0, &self.shareholder_dst)?; if !handoff.needs_share_reduction_switch_point(&x)? { return Err(Error::InvalidShareholder.into()); @@ -602,8 +614,7 @@ impl Instance { handoff: &Handoff, client: &RemoteClient, ) -> Result { - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let x = encode_shareholder::(&node_id.0, &dst)?; + let x = encode_shareholder::(&node_id.0, &self.shareholder_dst)?; if !handoff.needs_full_share_distribution_switch_point(&x)? { return Err(Error::InvalidShareholder.into()); @@ -637,8 +648,7 @@ impl Instance { handoff: &Handoff, client: &RemoteClient, ) -> Result { - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let x = encode_shareholder::(&node_id.0, &dst)?; + let x = encode_shareholder::(&node_id.0, &self.shareholder_dst)?; if !handoff.needs_bivariate_share(&x)? { return Err(Error::InvalidShareholder.into()); @@ -724,8 +734,7 @@ impl Instance { let share = share.ok_or(Error::ShareholderNotFound)?; // Verify that the host hasn't changed. - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, churp_id); - let x = encode_shareholder::(&self.node_id.0, &dst)?; + let x = encode_shareholder::(&self.node_id.0, &self.shareholder_dst)?; if share.secret_share().coordinate_x() != &x { return Err(Error::InvalidHost.into()); } @@ -877,11 +886,10 @@ impl Instance { // Create a new handoff. let threshold = status.threshold; - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let me = encode_shareholder::(&self.node_id.0, &dst)?; + let me = encode_shareholder::(&self.node_id.0, &self.shareholder_dst)?; let mut shareholders = Vec::with_capacity(status.applications.len()); for id in status.applications.keys() { - let x = encode_shareholder::(&id.0, &dst)?; + let x = encode_shareholder::(&id.0, &self.shareholder_dst)?; shareholders.push(x); } let kind = Self::handoff_kind(status); @@ -1110,10 +1118,10 @@ impl Instance { /// Extends the given domain separation tag with key manager runtime ID /// and churp ID. - fn domain_separation_tag(&self, context: &[u8], churp_id: u8) -> Vec { + fn domain_separation_tag(context: &[u8], runtime_id: &Namespace, churp_id: u8) -> Vec { let mut dst = context.to_vec(); dst.extend(RUNTIME_CONTEXT_SEPARATOR); - dst.extend(&self.runtime_id.0); + dst.extend(runtime_id.0); dst.extend(CHURP_CONTEXT_SEPARATOR); dst.extend(&[churp_id]); dst @@ -1154,8 +1162,7 @@ impl Handler for Instance { self.verify_node_id(ctx, node_id)?; self.verify_km_enclave(ctx, &status.policy)?; - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let x = encode_shareholder::(&node_id.0, &dst)?; + let x = encode_shareholder::(&node_id.0, &self.shareholder_dst)?; let shareholder = self.get_shareholder(status.id, status.handoff)?; let point = shareholder.switch_point(&x); let point = scalar_to_bytes(&point); @@ -1183,8 +1190,7 @@ impl Handler for Instance { self.verify_node_id(ctx, node_id)?; self.verify_km_enclave(ctx, &status.policy)?; - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let x = encode_shareholder::(&node_id.0, &dst)?; + let x = encode_shareholder::(&node_id.0, &self.shareholder_dst)?; let handoff = self.get_handoff(status.next_handoff)?; let shareholder = handoff.get_reduced_shareholder()?; let point = shareholder.switch_point(&x); @@ -1208,8 +1214,7 @@ impl Handler for Instance { self.verify_node_id(ctx, node_id)?; self.verify_km_enclave(ctx, &status.policy)?; - let dst = self.domain_separation_tag(ENCODE_SHAREHOLDER_CONTEXT, status.id); - let x = encode_shareholder::(&node_id.0, &dst)?; + let x = encode_shareholder::(&node_id.0, &self.shareholder_dst)?; let kind = Self::handoff_kind(&status); let dealer = self.get_dealer(status.next_handoff)?; let share = dealer.make_share(x, kind); @@ -1232,8 +1237,7 @@ impl Handler for Instance { self.verify_rt_enclave(ctx, &status.policy, &req.key_runtime_id)?; let shareholder = self.get_shareholder(status.id, status.handoff)?; - let dst = self.domain_separation_tag(ENCODE_SGX_POLICY_KEY_ID_CONTEXT, status.id); - let point = shareholder.make_key_share::(&req.key_id.0, &dst)?; + let point = shareholder.make_key_share::(&req.key_id.0, &self.sgx_policy_key_id_dst)?; Ok((&point).into()) } From 0c8497614a0ebf4fc782b29ad9ec192759bc3d5d Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Fri, 19 Jul 2024 12:09:43 +0200 Subject: [PATCH 5/8] keymanager/src/churp: Move runtime id verification --- keymanager/src/churp/handler.rs | 179 ++++++++++++++------------------ 1 file changed, 78 insertions(+), 101 deletions(-) diff --git a/keymanager/src/churp/handler.rs b/keymanager/src/churp/handler.rs index c9f368214d3..e7e6ab7e169 100644 --- a/keymanager/src/churp/handler.rs +++ b/keymanager/src/churp/handler.rs @@ -365,9 +365,16 @@ impl Churp { } } - fn get_instance(&self, churp_id: u8) -> Result> { - let mut instances = self.instances.lock().unwrap(); + fn get_instance( + &self, + churp_id: u8, + runtime_id: Namespace, + ) -> Result> { + if self.runtime_id != runtime_id { + return Err(Error::RuntimeMismatch.into()); + } + let mut instances = self.instances.lock().unwrap(); if let Some(instance) = instances.get(&churp_id) { return Ok(instance.clone()); } @@ -392,7 +399,8 @@ impl Churp { impl Handler for Churp { fn verification_matrix(&self, req: &QueryRequest) -> Result> { - self.get_instance(req.id)?.verification_matrix(req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.verification_matrix(req) } fn share_reduction_switch_point( @@ -400,8 +408,8 @@ impl Handler for Churp { ctx: &RpcContext, req: &QueryRequest, ) -> Result> { - self.get_instance(req.id)? - .share_reduction_switch_point(ctx, req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.share_reduction_switch_point(ctx, req) } fn share_distribution_switch_point( @@ -409,8 +417,8 @@ impl Handler for Churp { ctx: &RpcContext, req: &QueryRequest, ) -> Result> { - self.get_instance(req.id)? - .share_distribution_switch_point(ctx, req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.share_distribution_switch_point(ctx, req) } fn bivariate_share( @@ -418,7 +426,8 @@ impl Handler for Churp { ctx: &RpcContext, req: &QueryRequest, ) -> Result { - self.get_instance(req.id)?.bivariate_share(ctx, req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.bivariate_share(ctx, req) } fn sgx_policy_key_share( @@ -426,31 +435,38 @@ impl Handler for Churp { ctx: &RpcContext, req: &KeyShareRequest, ) -> Result { - self.get_instance(req.id)?.sgx_policy_key_share(ctx, req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.sgx_policy_key_share(ctx, req) } fn init(&self, req: &HandoffRequest) -> Result { - self.get_instance(req.id)?.init(req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.init(req) } fn share_reduction(&self, req: &FetchRequest) -> Result { - self.get_instance(req.id)?.share_reduction(req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.share_reduction(req) } fn share_distribution(&self, req: &FetchRequest) -> Result { - self.get_instance(req.id)?.share_distribution(req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.share_distribution(req) } fn proactivization(&self, req: &FetchRequest) -> Result { - self.get_instance(req.id)?.proactivization(req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.proactivization(req) } fn confirmation(&self, req: &HandoffRequest) -> Result { - self.get_instance(req.id)?.confirmation(req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.confirmation(req) } fn finalize(&self, req: &HandoffRequest) -> Result<()> { - self.get_instance(req.id)?.finalize(req) + let instance = self.get_instance(req.id, req.runtime_id)?; + instance.finalize(req) } } @@ -562,7 +578,7 @@ impl Instance { // Fetch from the host node. if node_id == self.node_id { - let shareholder = self.get_shareholder(status.id, status.handoff)?; + let shareholder = self.get_shareholder(status.handoff)?; let point = shareholder.switch_point(&x); if handoff.needs_verification_matrix()? { @@ -579,13 +595,8 @@ impl Instance { if handoff.needs_verification_matrix()? { // The remote verification matrix needs to be verified. - let vm = block_on(client.churp_verification_matrix(status.id, status.handoff))?; - let checksum = Self::checksum_verification_matrix_bytes( - &vm, - self.runtime_id, - status.id, - status.handoff, - ); + let vm = block_on(client.churp_verification_matrix(self.churp_id, status.handoff))?; + let checksum = self.checksum_verification_matrix_bytes(&vm, status.handoff); let status_checksum = status.checksum.ok_or(Error::InvalidHandoff)?; // Should never happen. if checksum != status_checksum { return Err(Error::InvalidVerificationMatrixChecksum.into()); @@ -597,7 +608,7 @@ impl Instance { } let point = block_on(client.churp_share_reduction_point( - status.id, + self.churp_id, status.next_handoff, self.node_id, ))?; @@ -631,7 +642,7 @@ impl Instance { // Fetch from the remote node. client.set_nodes(vec![node_id]); let point = block_on(client.churp_share_distribution_point( - status.id, + self.churp_id, status.next_handoff, self.node_id, ))?; @@ -667,16 +678,15 @@ impl Instance { // Fetch from the remote node. client.set_nodes(vec![node_id]); - let share = - block_on(client.churp_bivariate_share(status.id, status.next_handoff, self.node_id))?; + let share = block_on(client.churp_bivariate_share( + self.churp_id, + status.next_handoff, + self.node_id, + ))?; // The remote verification matrix needs to be verified. - let checksum = Self::checksum_verification_matrix_bytes( - &share.verification_matrix, - self.runtime_id, - status.id, - status.next_handoff, - ); + let checksum = self + .checksum_verification_matrix_bytes(&share.verification_matrix, status.next_handoff); let application = status .applications .get(&node_id) @@ -691,12 +701,8 @@ impl Instance { handoff.add_bivariate_share(&x, verifiable_share) } - /// Returns the shareholder for the specified scheme and handoff epoch. - fn get_shareholder( - &self, - churp_id: u8, - epoch: EpochTime, - ) -> Result>> { + /// Returns the shareholder for the specified handoff epoch. + fn get_shareholder(&self, epoch: EpochTime) -> Result>> { // Check the memory first. Make sure to lock the new shareholders // so that we don't create two shareholders for the same handoff. let mut shareholders = self.shareholders.lock().unwrap(); @@ -710,7 +716,7 @@ impl Instance { // host has cleared the storage. let share = self .storage - .load_secret_share(churp_id, epoch) + .load_secret_share(self.churp_id, epoch) .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares. let share = match share { @@ -720,12 +726,13 @@ impl Instance { // succeeded as it might have been confirmed while we were away. let share = self .storage - .load_next_secret_share(churp_id, epoch) + .load_next_secret_share(self.churp_id, epoch) .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares. // If the share is valid, copy it. if let Some(share) = share.as_ref() { - self.storage.store_secret_share(share, churp_id, epoch)?; + self.storage + .store_secret_share(share, self.churp_id, epoch)?; } share @@ -896,7 +903,7 @@ impl Instance { let handoff = Handoff::new(threshold, me, shareholders, kind)?; if kind == HandoffKind::CommitteeUnchanged { - let shareholder = self.get_shareholder(self.churp_id, status.handoff)?; + let shareholder = self.get_shareholder(status.handoff)?; handoff.set_shareholder(shareholder)?; } @@ -921,17 +928,8 @@ impl Instance { /// Verifies parameters of the last successfully completed handoff against /// the latest status. - fn verify_last_handoff( - &self, - churp_id: u8, - runtime_id: Namespace, - epoch: EpochTime, - ) -> Result { - if self.runtime_id != runtime_id { - return Err(Error::RuntimeMismatch.into()); - } - - let status = self.churp_state.status(self.runtime_id, churp_id)?; + fn verify_last_handoff(&self, epoch: EpochTime) -> Result { + let status = self.churp_state.status(self.runtime_id, self.churp_id)?; if status.handoff != epoch { return Err(Error::HandoffMismatch.into()); } @@ -941,17 +939,8 @@ impl Instance { /// Verifies parameters of the next handoff against the latest status /// and checks whether the handoff is in progress. - fn verify_next_handoff( - &self, - churp_id: u8, - runtime_id: Namespace, - epoch: EpochTime, - ) -> Result { - if self.runtime_id != runtime_id { - return Err(Error::RuntimeMismatch.into()); - } - - let status = self.churp_state.status(self.runtime_id, churp_id)?; + fn verify_next_handoff(&self, epoch: EpochTime) -> Result { + let status = self.churp_state.status(self.runtime_id, self.churp_id)?; if status.next_handoff != epoch { return Err(Error::HandoffMismatch.into()); } @@ -1070,28 +1059,22 @@ impl Instance { /// Computes the checksum of the verification matrix. fn checksum_verification_matrix( + &self, matrix: &VerificationMatrix, - runtime_id: Namespace, - churp_id: u8, epoch: EpochTime, ) -> Hash where G: Group + GroupEncoding, { - Self::checksum_verification_matrix_bytes(&matrix.to_bytes(), runtime_id, churp_id, epoch) + self.checksum_verification_matrix_bytes(&matrix.to_bytes(), epoch) } /// Computes the checksum of the verification matrix bytes. - fn checksum_verification_matrix_bytes( - bytes: &Vec, - runtime_id: Namespace, - churp_id: u8, - epoch: EpochTime, - ) -> Hash { + fn checksum_verification_matrix_bytes(&self, bytes: &Vec, epoch: EpochTime) -> Hash { let mut checksum = [0u8; 32]; let mut f = KMac::new_kmac256(bytes, CHECKSUM_VERIFICATION_MATRIX_CUSTOM); - f.update(&runtime_id.0); - f.update(&[churp_id]); + f.update(&self.runtime_id.0); + f.update(&[self.churp_id]); f.update(&epoch.to_le_bytes()); f.finalize(&mut checksum); Hash(checksum) @@ -1130,9 +1113,9 @@ impl Instance { impl Handler for Instance { fn verification_matrix(&self, req: &QueryRequest) -> Result> { - let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_last_handoff(req.epoch)?; let shareholder = match status.suite_id { - SuiteId::NistP384Sha3_384 => self.get_shareholder(req.id, req.epoch)?, + SuiteId::NistP384Sha3_384 => self.get_shareholder(req.epoch)?, }; let vm = shareholder .verifiable_share() @@ -1147,7 +1130,7 @@ impl Handler for Instance { ctx: &RpcContext, req: &QueryRequest, ) -> Result> { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_next_handoff(req.epoch)?; let kind = Self::handoff_kind(&status); if !matches!(kind, HandoffKind::CommitteeChanged) { @@ -1163,7 +1146,7 @@ impl Handler for Instance { self.verify_km_enclave(ctx, &status.policy)?; let x = encode_shareholder::(&node_id.0, &self.shareholder_dst)?; - let shareholder = self.get_shareholder(status.id, status.handoff)?; + let shareholder = self.get_shareholder(status.handoff)?; let point = shareholder.switch_point(&x); let point = scalar_to_bytes(&point); @@ -1175,7 +1158,7 @@ impl Handler for Instance { ctx: &RpcContext, req: &QueryRequest, ) -> Result> { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_next_handoff(req.epoch)?; let kind = Self::handoff_kind(&status); if !matches!(kind, HandoffKind::CommitteeChanged) { @@ -1204,7 +1187,7 @@ impl Handler for Instance { ctx: &RpcContext, req: &QueryRequest, ) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_next_handoff(req.epoch)?; let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?; if !status.applications.contains_key(node_id) { @@ -1232,20 +1215,16 @@ impl Handler for Instance { ctx: &RpcContext, req: &KeyShareRequest, ) -> Result { - let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_last_handoff(req.epoch)?; self.verify_rt_enclave(ctx, &status.policy, &req.key_runtime_id)?; - let shareholder = self.get_shareholder(status.id, status.handoff)?; + let shareholder = self.get_shareholder(status.handoff)?; let point = shareholder.make_key_share::(&req.key_id.0, &self.sgx_policy_key_id_dst)?; Ok((&point).into()) } fn init(&self, req: &HandoffRequest) -> Result { - if self.runtime_id != req.runtime_id { - return Err(Error::RuntimeMismatch.into()); - } - let status = self.churp_state.status(self.runtime_id, req.id)?; if status.next_handoff != req.epoch { return Err(Error::HandoffMismatch.into()); @@ -1267,8 +1246,7 @@ impl Handler for Instance { // Fetch verification matrix and compute its checksum. let matrix = dealer.verification_matrix(); - let checksum = - Self::checksum_verification_matrix(matrix, self.runtime_id, req.id, req.epoch); + let checksum = self.checksum_verification_matrix(matrix, req.epoch); // Prepare response and sign it with RAK. let application = ApplicationRequest { @@ -1289,7 +1267,7 @@ impl Handler for Instance { } fn share_reduction(&self, req: &FetchRequest) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_next_handoff(req.epoch)?; let handoff = self.get_or_create_handoff(&status)?; let client = self.key_manager_client(&status, false)?; @@ -1299,7 +1277,7 @@ impl Handler for Instance { } fn share_distribution(&self, req: &FetchRequest) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_next_handoff(req.epoch)?; let handoff = self.get_handoff(status.next_handoff)?; let client = self.key_manager_client(&status, true)?; let f = |node_id| { @@ -1309,7 +1287,7 @@ impl Handler for Instance { } fn proactivization(&self, req: &FetchRequest) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_next_handoff(req.epoch)?; let handoff = self.get_or_create_handoff(&status)?; let client = self.key_manager_client(&status, true)?; let f = |node_id| self.fetch_bivariate_share(node_id, &status, &handoff, &client); @@ -1317,7 +1295,7 @@ impl Handler for Instance { } fn confirmation(&self, req: &HandoffRequest) -> Result { - let status = self.verify_next_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_next_handoff(req.epoch)?; if !status.applications.contains_key(&self.node_id) { return Err(Error::ApplicationNotSubmitted.into()); @@ -1330,20 +1308,19 @@ impl Handler for Instance { // Before overwriting the next secret share, make sure it was copied // and used to construct the last shareholder. let _ = self - .get_shareholder(status.id, status.handoff) + .get_shareholder(status.handoff) .map(Some) .or_else(|err| ignore_error(err, Error::ShareholderNotFound))?; // Ignore if we don't have the correct share. // Always persist the secret share before sending confirmation. self.storage - .store_next_secret_share(share, status.id, status.next_handoff)?; + .store_next_secret_share(share, self.churp_id, status.next_handoff)?; // Prepare response and sign it with RAK. let vm = share.verification_matrix(); - let checksum = - Self::checksum_verification_matrix(vm, self.runtime_id, status.id, status.next_handoff); + let checksum = self.checksum_verification_matrix(vm, status.next_handoff); let confirmation = ConfirmationRequest { - id: status.id, + id: self.churp_id, runtime_id: self.runtime_id, epoch: status.next_handoff, checksum, @@ -1360,7 +1337,7 @@ impl Handler for Instance { } fn finalize(&self, req: &HandoffRequest) -> Result<()> { - let status = self.verify_last_handoff(req.id, req.runtime_id, req.epoch)?; + let status = self.verify_last_handoff(req.epoch)?; // Move the shareholder if the handoff was completed. let handoff = self.get_handoff(status.handoff); @@ -1375,7 +1352,7 @@ impl Handler for Instance { let shareholder = handoff.get_full_shareholder()?; let share = shareholder.verifiable_share(); self.storage - .store_secret_share(share, status.id, status.handoff)?; + .store_secret_share(share, self.churp_id, status.handoff)?; self.add_shareholder(shareholder, status.handoff); } From b9437655a1ae28faeded0b90a78d86738e326718 Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Fri, 19 Jul 2024 13:43:13 +0200 Subject: [PATCH 6/8] keymanager/src/churp: Rename init method to apply --- go/keymanager/churp/rpc.go | 4 ++-- go/worker/keymanager/churp.go | 2 +- keymanager/src/churp/handler.rs | 12 ++++++------ keymanager/src/churp/methods.rs | 8 ++++---- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/go/keymanager/churp/rpc.go b/go/keymanager/churp/rpc.go index 7c3143099cf..5623133b2cb 100644 --- a/go/keymanager/churp/rpc.go +++ b/go/keymanager/churp/rpc.go @@ -6,8 +6,8 @@ import ( ) var ( - // RPCMethodInit is the name of the `init` method. - RPCMethodInit = "churp/init" + // RPCMethodApply is the name of the `apply` method. + RPCMethodApply = "churp/apply" // RPCMethodShareReduction is the name of the `share_reduction` method. RPCMethodShareReduction = "churp/share_reduction" diff --git a/go/worker/keymanager/churp.go b/go/worker/keymanager/churp.go index 266d66b106e..0e06eae83f1 100644 --- a/go/worker/keymanager/churp.go +++ b/go/worker/keymanager/churp.go @@ -491,7 +491,7 @@ func (s *submissionScheduler) trySubmitApplication(ctx context.Context, status * Epoch: status.NextHandoff, } var rsp churp.SignedApplicationRequest - if err := timeCallEnclaveLocal(ctx, s.kmWorker, churp.RPCMethodInit, req, &rsp, status); err != nil { + if err := timeCallEnclaveLocal(ctx, s.kmWorker, churp.RPCMethodApply, req, &rsp, status); err != nil { return fmt.Errorf("failed to generate verification matrix: %w", err) } diff --git a/keymanager/src/churp/handler.rs b/keymanager/src/churp/handler.rs index e7e6ab7e169..da236a10bb1 100644 --- a/keymanager/src/churp/handler.rs +++ b/keymanager/src/churp/handler.rs @@ -251,7 +251,7 @@ pub(crate) trait Handler { /// first one (dealing phase). /// /// This method must be called locally. - fn init(&self, req: &HandoffRequest) -> Result; + fn apply(&self, req: &HandoffRequest) -> Result; /// Tries to fetch switch points for share reduction from the given nodes. /// @@ -439,9 +439,9 @@ impl Handler for Churp { instance.sgx_policy_key_share(ctx, req) } - fn init(&self, req: &HandoffRequest) -> Result { + fn apply(&self, req: &HandoffRequest) -> Result { let instance = self.get_instance(req.id, req.runtime_id)?; - instance.init(req) + instance.apply(req) } fn share_reduction(&self, req: &FetchRequest) -> Result { @@ -1224,8 +1224,8 @@ impl Handler for Instance { Ok((&point).into()) } - fn init(&self, req: &HandoffRequest) -> Result { - let status = self.churp_state.status(self.runtime_id, req.id)?; + fn apply(&self, req: &HandoffRequest) -> Result { + let status = self.churp_state.status(self.runtime_id, self.churp_id)?; if status.next_handoff != req.epoch { return Err(Error::HandoffMismatch.into()); } @@ -1250,7 +1250,7 @@ impl Handler for Instance { // Prepare response and sign it with RAK. let application = ApplicationRequest { - id: req.id, + id: self.churp_id, runtime_id: self.runtime_id, epoch: req.epoch, checksum, diff --git a/keymanager/src/churp/methods.rs b/keymanager/src/churp/methods.rs index 88e3951c75e..7bc7c6f2255 100644 --- a/keymanager/src/churp/methods.rs +++ b/keymanager/src/churp/methods.rs @@ -8,8 +8,8 @@ use oasis_core_runtime::enclave_rpc::{ use crate::churp::{Churp, Handler}; -/// Name of the `init` method. -pub const METHOD_INIT: &str = "churp/init"; +/// Name of the `apply` method. +pub const METHOD_APPLY: &str = "churp/apply"; /// Name of the `share_reduction` method. pub const METHOD_SHARE_REDUCTION: &str = "churp/share_reduction"; /// Name of the `share_distribution` method. @@ -74,10 +74,10 @@ impl RpcHandler for Churp { /* Local queries */ RpcMethod::new( RpcMethodDescriptor { - name: METHOD_INIT.to_string(), + name: METHOD_APPLY.to_string(), kind: RpcKind::LocalQuery, }, - move |_ctx: &_, req: &_| self.init(req), + move |_ctx: &_, req: &_| self.apply(req), ), RpcMethod::new( RpcMethodDescriptor { From b4b68f15e7dc10057e1599669615f0cf078d691f Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Sun, 21 Jul 2024 16:34:44 +0200 Subject: [PATCH 7/8] keymanager/src/churp: Load dealer and shareholder on startup --- keymanager/src/churp/handler.rs | 358 +++++++++++++++++++------------- 1 file changed, 217 insertions(+), 141 deletions(-) diff --git a/keymanager/src/churp/handler.rs b/keymanager/src/churp/handler.rs index da236a10bb1..028c552d97c 100644 --- a/keymanager/src/churp/handler.rs +++ b/keymanager/src/churp/handler.rs @@ -370,15 +370,18 @@ impl Churp { churp_id: u8, runtime_id: Namespace, ) -> Result> { + // Ensure runtime_id matches. if self.runtime_id != runtime_id { return Err(Error::RuntimeMismatch.into()); } + // Return the instance if it exists. let mut instances = self.instances.lock().unwrap(); if let Some(instance) = instances.get(&churp_id) { return Ok(instance.clone()); } + // Create a new instance based on the suite type. let status = self.churp_state.status(self.runtime_id, churp_id)?; let instance = match status.suite_id { SuiteId::NistP384Sha3_384 => Instance::::new( @@ -390,6 +393,11 @@ impl Churp { self.policies.clone(), ), }; + + // Load secret shares and bivariate share. + instance.init(&status)?; + + // Store the new instance. let instance = Arc::new(instance); instances.insert(churp_id, instance.clone()); @@ -497,6 +505,9 @@ struct Instance { registry_state: RegistryState, /// Shareholders with secret shares for completed handoffs. + /// + /// The map may also contain shareholders for failed or unfinished + /// handoffs, so always verify if the handoff succeeded in the consensus. shareholders: Mutex>>>, /// Dealer of bivariate shares for the next handoff. dealer: Mutex>>, @@ -514,6 +525,7 @@ struct Instance { } impl Instance { + /// Creates a new CHURP instance. pub fn new( churp_id: u8, node_id: PublicKey, @@ -562,6 +574,20 @@ impl Instance { } } + /// Initializes the instance by loading the shareholder for the last + /// successfully completed handoff, as well as the shareholder and + /// the dealer for the upcoming handoff, if they are available. + pub fn init(&self, status: &Status) -> Result<()> { + let checksum = status + .applications + .get(&self.node_id) + .map(|app| app.checksum); + + self.load_shareholder(status.handoff)?; + self.load_next_shareholder(status.next_handoff)?; + self.load_dealer(status.next_handoff, checksum) + } + /// Tries to fetch switch point for share reduction from the given node. pub fn fetch_share_reduction_switch_point( &self, @@ -701,35 +727,50 @@ impl Instance { handoff.add_bivariate_share(&x, verifiable_share) } - /// Returns the shareholder for the specified handoff epoch. + /// Returns the shareholder for the given epoch. fn get_shareholder(&self, epoch: EpochTime) -> Result>> { - // Check the memory first. Make sure to lock the new shareholders - // so that we don't create two shareholders for the same handoff. + let shareholders = self.shareholders.lock().unwrap(); + shareholders + .get(&epoch) + .cloned() + .ok_or(Error::ShareholderNotFound.into()) + } + + /// Adds a shareholder for the given epoch. + fn add_shareholder(&self, shareholder: Arc>, epoch: EpochTime) { let mut shareholders = self.shareholders.lock().unwrap(); + shareholders.insert(epoch, shareholder); + } - if let Some(shareholder) = shareholders.get(&epoch) { - return Ok(shareholder.clone()); + /// Keeps only the shareholders that belong to one of the given epochs. + fn keep_shareholders(&self, epochs: &[EpochTime]) { + let mut shareholders = self.shareholders.lock().unwrap(); + shareholders.retain(|epoch, _| epochs.contains(epoch)); + } + + /// Loads the shareholder from local storage for the given epoch. + fn load_shareholder(&self, epoch: EpochTime) -> Result<()> { + // Skip if no handoffs have been completed so far. + if epoch == 0 { + return Ok(()); } - // Fetch shareholder's secret share from the local storage and use it - // to restore the internal state upon restarts, unless a malicious - // host has cleared the storage. let share = self .storage .load_secret_share(self.churp_id, epoch) .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares. + // If the secret share is not available, check if the next handoff + // succeeded as it might have been confirmed while we were away. let share = match share { Some(share) => Some(share), None => { - // If the secret share is not available, check if the next handoff - // succeeded as it might have been confirmed while we were away. let share = self .storage .load_next_secret_share(self.churp_id, epoch) .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares. - // If the share is valid, copy it. + // // Back up the secret share, if it is valid. if let Some(share) = share.as_ref() { self.storage .store_secret_share(share, self.churp_id, epoch)?; @@ -738,115 +779,137 @@ impl Instance { share } }; - let share = share.ok_or(Error::ShareholderNotFound)?; + + self.verify_and_add_shareholder(share, epoch) + } + + /// Loads the next shareholder from local storage for the given epoch. + fn load_next_shareholder(&self, epoch: EpochTime) -> Result<()> { + let share = self + .storage + .load_next_secret_share(self.churp_id, epoch) + .or_else(|err| ignore_error(err, Error::InvalidSecretShare))?; // Ignore previous shares. + + self.verify_and_add_shareholder(share, epoch) + } + + fn verify_and_add_shareholder( + &self, + share: Option>, + epoch: EpochTime, + ) -> Result<()> { + let share = match share { + Some(share) => share, + None => return Ok(()), + }; // Verify that the host hasn't changed. - let x = encode_shareholder::(&self.node_id.0, &self.shareholder_dst)?; - if share.secret_share().coordinate_x() != &x { + let me = encode_shareholder::(&self.node_id.0, &self.shareholder_dst)?; + if share.secret_share().coordinate_x() != &me { return Err(Error::InvalidHost.into()); } // Create a new shareholder. let shareholder = Arc::new(Shareholder::from(share)); - shareholders.insert(epoch, shareholder.clone()); - Ok(shareholder) - } + // Store the shareholder. + self.add_shareholder(shareholder, epoch); - /// Adds a shareholder for the specified scheme and handoff epoch. - fn add_shareholder(&self, shareholder: Arc>, epoch: EpochTime) { - let mut shareholders = self.shareholders.lock().unwrap(); - shareholders.insert(epoch, shareholder); + Ok(()) } - /// Removes shareholders that belong to a handoff that happened at or before - /// the given epoch. - fn remove_shareholders(&self, max_epoch: EpochTime) { - let mut shareholders = self.shareholders.lock().unwrap(); - shareholders.retain(|&epoch, _| epoch > max_epoch); + /// Returns the dealer for the given epoch. + fn get_dealer(&self, epoch: EpochTime) -> Result>> { + let dealer_guard = self.dealer.lock().unwrap(); + + let dealer_info = match dealer_guard.as_ref() { + Some(dealer_info) => dealer_info, + None => return Err(Error::DealerNotFound.into()), + }; + if dealer_info.epoch != epoch { + return Err(Error::DealerNotFound.into()); + } + + Ok(dealer_info.dealer.clone()) } - /// Returns the dealer for the specified handoff epoch. - fn get_dealer(&self, epoch: EpochTime) -> Result>> { - self._get_or_create_dealer(epoch, None, None) + /// Adds a dealer for the given epoch. If a dealer is already set, + /// it will be overwritten. + fn add_dealer(&self, dealer: Arc>, epoch: EpochTime) { + let mut dealer_guard = self.dealer.lock().unwrap(); + *dealer_guard = Some(DealerInfo { epoch, dealer }); } - /// Returns the dealer for the specified handoff epoch. If the dealer - /// doesn't exist, a new one is created. - fn get_or_create_dealer( + /// Creates a new dealer for the given epoch. + /// + /// If a dealer for the same or any other epoch already exists, it will + /// be removed, its bivariate polynomial overwritten, and permanently + /// lost. + /// + /// Note that since the host controls the local storage, he can restart + /// the enclave to create multiple dealers for the same epoch and then + /// replace the last backup with a bivariate polynomial from a dealer + /// of his choice. Therefore, it is essential to verify the bivariate + /// polynomial after loading or when deriving bivariate shares. + fn create_dealer( &self, epoch: EpochTime, threshold: u8, dealing_phase: bool, ) -> Result>> { - self._get_or_create_dealer(epoch, Some(threshold), Some(dealing_phase)) - } + // Create a new dealer. + let dealer = Dealer::create(threshold, dealing_phase, &mut OsRng)?; + let dealer = Arc::new(dealer); - fn _get_or_create_dealer( - &self, - epoch: EpochTime, - threshold: Option, - dealing_phase: Option, - ) -> Result>> { - // Check the memory first. Make sure to lock the dealer so that we - // don't create two dealers for the same handoff. - let mut dealer_guard = self.dealer.lock().unwrap(); + // Encrypt and store the polynomial in case of a restart. + let polynomial = dealer.bivariate_polynomial(); + self.storage + .store_bivariate_polynomial(polynomial, self.churp_id, epoch)?; - if let Some(dealer_info) = dealer_guard.as_ref() { - match epoch.cmp(&dealer_info.epoch) { - cmp::Ordering::Less => return Err(Error::InvalidHandoff.into()), - cmp::Ordering::Equal => return Ok(dealer_info.dealer.clone()), - cmp::Ordering::Greater => (), - } + // Store the dealer. + self.add_dealer(dealer.clone(), epoch); + + Ok(dealer) + } + + /// Loads the dealer for the given epoch from the local storage and verifies + /// it against the provided checksum. + fn load_dealer(&self, epoch: EpochTime, checksum: Option) -> Result<()> { + // Skip if handoffs are disabled. + if epoch == HANDOFFS_DISABLED { + return Ok(()); } - // Check the local storage to ensure that only one secret bivariate - // polynomial is generated per handoff upon restarts, unless a malicious - // host has cleared the storage. + // Load untrusted polynomial. let polynomial = self .storage .load_bivariate_polynomial(self.churp_id, epoch) .or_else(|err| ignore_error(err, Error::InvalidBivariatePolynomial))?; // Ignore previous dealers. - let dealer = match polynomial { - Some(bp) => { - // Polynomial verification is redundant as encryption prevents - // tampering, while consensus ensures that the group ID remains - // unchanged and that polynomial dimensions remain consistent - // for any given pair of churp ID and handoff. - Dealer::from(bp) - } - None => { - // Skip dealer creation if not needed. - let threshold = threshold.ok_or(Error::DealerNotFound)?; - let dealing_phase = dealing_phase.ok_or(Error::DealerNotFound)?; - - // The local storage is either empty or contains a polynomial - // from another handoff. It's time to prepare a new one. - // - // If the host has cleared the storage, other participants - // will detect the polynomial change because the checksum - // of the verification matrix in the submitted application - // will also change. - let dealer = Dealer::create(threshold, dealing_phase, &mut OsRng)?; - - // Encrypt and store the polynomial in case of a restart. - let polynomial = dealer.bivariate_polynomial(); - self.storage - .store_bivariate_polynomial(polynomial, self.churp_id, epoch)?; - - dealer - } + let polynomial = match polynomial { + Some(polynomial) => polynomial, + None => return Ok(()), }; - // Create a new dealer. - let dealer = Arc::new(dealer); - *dealer_guard = Some(DealerInfo { - epoch, - dealer: dealer.clone(), - }); + // Create untrusted dealer. + let dealer = Arc::new(Dealer::from(polynomial)); - Ok(dealer) + // Verify that the host hasn't created multiple dealers for the same + // epoch and replaced the polynomial that was used to prepare + // the application. + if let Some(checksum) = checksum { + let verification_matrix = dealer.verification_matrix(); + let computed_checksum = self.checksum_verification_matrix(verification_matrix, epoch); + + if checksum != computed_checksum { + return Err(Error::InvalidBivariatePolynomial.into()); + } + } + + // Store the dealer. + self.add_dealer(dealer, epoch); + + Ok(()) } /// Removes the dealer if it belongs to a handoff that occurred @@ -860,37 +923,33 @@ impl Instance { } } - /// Returns the handoff for the specified handoff epoch. + /// Returns the handoff for the given epoch. fn get_handoff(&self, epoch: EpochTime) -> Result>> { - self._get_or_create_handoff(epoch, None) - } + let handoff_guard = self.handoff.lock().unwrap(); - /// Returns the handoff for the next handoff epoch. If the handoff doesn't - /// exist, a new one is created. - fn get_or_create_handoff(&self, status: &Status) -> Result>> { - self._get_or_create_handoff(status.next_handoff, Some(status)) + let handoff_info = handoff_guard + .as_ref() + .filter(|hi| hi.epoch == epoch) + .ok_or(Error::HandoffNotFound)?; + + Ok(handoff_info.handoff.clone()) } - fn _get_or_create_handoff( - &self, - epoch: EpochTime, - status: Option<&Status>, - ) -> Result>> { - // Check the memory first. Make sure to lock the handoff so that we - // don't create two handoffs for the same epoch. + /// Creates a handoff for the next handoff epoch. If a handoff already + /// exists, the existing one is returned. + fn get_or_create_handoff(&self, status: &Status) -> Result>> { + // Make sure to lock the handoff so that we don't create two handoffs + // for the same epoch. let mut handoff_guard = self.handoff.lock().unwrap(); if let Some(handoff_info) = handoff_guard.as_ref() { - match epoch.cmp(&handoff_info.epoch) { + match status.next_handoff.cmp(&handoff_info.epoch) { cmp::Ordering::Less => return Err(Error::InvalidHandoff.into()), cmp::Ordering::Equal => return Ok(handoff_info.handoff.clone()), cmp::Ordering::Greater => (), } } - // Skip handoff creation if not needed. - let status = status.ok_or(Error::HandoffNotFound)?; - // Create a new handoff. let threshold = status.threshold; let me = encode_shareholder::(&self.node_id.0, &self.shareholder_dst)?; @@ -901,15 +960,18 @@ impl Instance { } let kind = Self::handoff_kind(status); let handoff = Handoff::new(threshold, me, shareholders, kind)?; + let handoff = Arc::new(handoff); + // If the committee hasn't changed, we need the latest shareholder + // to randomize its share. if kind == HandoffKind::CommitteeUnchanged { let shareholder = self.get_shareholder(status.handoff)?; handoff.set_shareholder(shareholder)?; } - let handoff = Arc::new(handoff); + // Store the handoff. *handoff_guard = Some(HandoffInfo { - epoch, + epoch: status.next_handoff, handoff: handoff.clone(), }); @@ -1192,7 +1254,12 @@ impl Handler for Instance { let node_id = req.node_id.as_ref().ok_or(Error::NotAuthenticated)?; if !status.applications.contains_key(node_id) { return Err(Error::NotInCommittee.into()); - } + }; + + let application = status + .applications + .get(&self.node_id) + .ok_or(Error::NotInCommittee)?; self.verify_node_id(ctx, node_id)?; self.verify_km_enclave(ctx, &status.policy)?; @@ -1204,6 +1271,15 @@ impl Handler for Instance { let share = (&share).into(); let verification_matrix = dealer.verification_matrix().to_bytes(); + // Verify that the host hasn't created multiple dealers for the same + // epoch and replaced the polynomial that was used to prepare + // the application. + let computed_checksum = + self.checksum_verification_matrix_bytes(&verification_matrix, status.next_handoff); + if application.checksum != computed_checksum { + return Err(Error::InvalidBivariatePolynomial.into()); + } + Ok(EncodedVerifiableSecretShare { share, verification_matrix, @@ -1236,13 +1312,15 @@ impl Handler for Instance { return Err(Error::ApplicationSubmitted.into()); } + // Ensure application is submitted one epoch before the next handoff. let now = self.beacon_state.epoch()?; if status.next_handoff != now + 1 { return Err(Error::ApplicationsClosed.into()); } + // Create a new dealer. let dealing_phase = status.committee.is_empty(); - let dealer = self.get_or_create_dealer(req.epoch, status.threshold, dealing_phase)?; + let dealer = self.create_dealer(status.next_handoff, status.threshold, dealing_phase)?; // Fetch verification matrix and compute its checksum. let matrix = dealer.verification_matrix(); @@ -1252,7 +1330,7 @@ impl Handler for Instance { let application = ApplicationRequest { id: self.churp_id, runtime_id: self.runtime_id, - epoch: req.epoch, + epoch: status.next_handoff, checksum, }; let body = cbor::to_vec(application.clone()); @@ -1288,7 +1366,10 @@ impl Handler for Instance { fn proactivization(&self, req: &FetchRequest) -> Result { let status = self.verify_next_handoff(req.epoch)?; - let handoff = self.get_or_create_handoff(&status)?; + let handoff = match Self::handoff_kind(&status) { + HandoffKind::CommitteeChanged => self.get_handoff(status.next_handoff)?, + _ => self.get_or_create_handoff(&status)?, + }; let client = self.key_manager_client(&status, true)?; let f = |node_id| self.fetch_bivariate_share(node_id, &status, &handoff, &client); fetch(f, &req.node_ids) @@ -1301,21 +1382,21 @@ impl Handler for Instance { return Err(Error::ApplicationNotSubmitted.into()); } + // Fetch the next shareholder and its secret share. let handoff = self.get_handoff(status.next_handoff)?; let shareholder = handoff.get_full_shareholder()?; let share = shareholder.verifiable_share(); - // Before overwriting the next secret share, make sure it was copied - // and used to construct the last shareholder. - let _ = self - .get_shareholder(status.handoff) - .map(Some) - .or_else(|err| ignore_error(err, Error::ShareholderNotFound))?; // Ignore if we don't have the correct share. - - // Always persist the secret share before sending confirmation. + // Back up the secret share before sending confirmation. self.storage .store_next_secret_share(share, self.churp_id, status.next_handoff)?; + // Store the shareholder. Observe that we are adding the shareholder + // before the consensus has confirmed that the handoff was completed. + // This is fine, as we always verify the handoff epoch before fetching + // a shareholder. + self.add_shareholder(shareholder.clone(), status.next_handoff); + // Prepare response and sign it with RAK. let vm = share.verification_matrix(); let checksum = self.checksum_verification_matrix(vm, status.next_handoff); @@ -1339,32 +1420,27 @@ impl Handler for Instance { fn finalize(&self, req: &HandoffRequest) -> Result<()> { let status = self.verify_last_handoff(req.epoch)?; - // Move the shareholder if the handoff was completed. - let handoff = self.get_handoff(status.handoff); - let handoff = match handoff { - Ok(handoff) => Some(handoff), - Err(err) => match err.downcast_ref::() { - Some(err) if err == &Error::HandoffNotFound => None, - _ => return Err(err), - }, - }; - if let Some(handoff) = handoff { - let shareholder = handoff.get_full_shareholder()?; - let share = shareholder.verifiable_share(); - self.storage - .store_secret_share(share, self.churp_id, status.handoff)?; - self.add_shareholder(shareholder, status.handoff); - } - - // Cleanup. - let max_epoch = status.handoff.saturating_sub(1); - self.remove_shareholders(max_epoch); + // Cleanup shareholders by removing those for past or failed handoffs. + let epochs = [status.handoff, status.next_handoff]; + self.keep_shareholders(&epochs); + // Cleaning up dealers and handoffs is optional, + // as they are overwritten during the next handoff. let max_epoch = status.next_handoff.saturating_sub(1); self.remove_dealer(max_epoch); self.remove_handoff(max_epoch); - Ok(()) + // Fetch the last shareholder and its secret share. + let shareholder = match self.get_shareholder(status.handoff) { + Ok(shareholder) => shareholder, + Err(_) => return Ok(()), // Not found. + }; + let share = shareholder.verifiable_share(); + + // Back up the secret share. This operation will be a no-op + // if the handoff failed, as the last shareholder hasn't changed. + self.storage + .store_secret_share(share, self.churp_id, status.handoff) } } From ba2b014d8f0b5837bcc525e2f2590526f5d129c1 Mon Sep 17 00:00:00 2001 From: Peter Nose Date: Tue, 23 Jul 2024 08:23:08 +0200 Subject: [PATCH 8/8] keymanager/src/churp: Serve key shares to lagging clients --- .changelog/5784.trivial.md | 1 + keymanager/src/churp/handler.rs | 36 ++++++++++++++++++++++++++------- keymanager/src/churp/state.rs | 18 +++++++++++++++++ 3 files changed, 48 insertions(+), 7 deletions(-) create mode 100644 .changelog/5784.trivial.md diff --git a/.changelog/5784.trivial.md b/.changelog/5784.trivial.md new file mode 100644 index 00000000000..c82d68e762c --- /dev/null +++ b/.changelog/5784.trivial.md @@ -0,0 +1 @@ +keymanager/src/churp: Serve key shares to lagging clients diff --git a/keymanager/src/churp/handler.rs b/keymanager/src/churp/handler.rs index 028c552d97c..d61eddb7082 100644 --- a/keymanager/src/churp/handler.rs +++ b/keymanager/src/churp/handler.rs @@ -97,6 +97,10 @@ const RUNTIME_CONTEXT_SEPARATOR: &[u8] = b" for runtime "; /// on the churp ID. const CHURP_CONTEXT_SEPARATOR: &[u8] = b" for churp "; +/// The number of blocks a remote client is allowed to be behind when querying +/// past key shares. +const ALLOWED_BLOCKS_BEHIND: u64 = 5; + /// Represents information about a dealer. struct DealerInfo { /// The epoch during which this dealer is active. @@ -742,10 +746,12 @@ impl Instance { shareholders.insert(epoch, shareholder); } - /// Keeps only the shareholders that belong to one of the given epochs. - fn keep_shareholders(&self, epochs: &[EpochTime]) { + /// Keeps only the shareholder for the given epoch and the shareholder + /// preceding that one. + fn clean_shareholders(&self, epoch: EpochTime) { let mut shareholders = self.shareholders.lock().unwrap(); - shareholders.retain(|epoch, _| epochs.contains(epoch)); + let second_last = shareholders.keys().filter(|&&e| e < epoch).max().cloned(); + shareholders.retain(|&e, _| e == epoch || Some(e) == second_last); } /// Loads the shareholder from local storage for the given epoch. @@ -1291,12 +1297,27 @@ impl Handler for Instance { ctx: &RpcContext, req: &KeyShareRequest, ) -> Result { - let status = self.verify_last_handoff(req.epoch)?; + let status = self.churp_state.status(self.runtime_id, self.churp_id)?; + let status = if status.handoff != req.epoch { + // Allow querying past key shares if the client is a few blocks behind. + self.churp_state + .status_before(self.runtime_id, self.churp_id, ALLOWED_BLOCKS_BEHIND)? + } else { + status + }; + if status.handoff != req.epoch { + return Err(Error::HandoffMismatch.into()); + } + + // Note that querying past key shares can fail at this point + // if the policy has changed. self.verify_rt_enclave(ctx, &status.policy, &req.key_runtime_id)?; + // Prepare key share. let shareholder = self.get_shareholder(status.handoff)?; let point = shareholder.make_key_share::(&req.key_id.0, &self.sgx_policy_key_id_dst)?; + Ok((&point).into()) } @@ -1420,9 +1441,10 @@ impl Handler for Instance { fn finalize(&self, req: &HandoffRequest) -> Result<()> { let status = self.verify_last_handoff(req.epoch)?; - // Cleanup shareholders by removing those for past or failed handoffs. - let epochs = [status.handoff, status.next_handoff]; - self.keep_shareholders(&epochs); + // Keep only the last two shareholders. The second-last shareholder + // could be removed after a few blocks, as we need it only to serve + // clients that are lagging behind. + self.clean_shareholders(status.handoff); // Cleaning up dealers and handoffs is optional, // as they are overwritten during the next handoff. diff --git a/keymanager/src/churp/state.rs b/keymanager/src/churp/state.rs index 4fb9e8ca8ed..bb8982e8748 100644 --- a/keymanager/src/churp/state.rs +++ b/keymanager/src/churp/state.rs @@ -35,4 +35,22 @@ impl State { Ok(status) } + + /// Returns CHURP status before the given number of blocks. + pub fn status_before( + &self, + runtime_id: Namespace, + churp_id: u8, + blocks: u64, + ) -> Result { + let height = block_on(self.consensus_verifier.latest_height())?; + let height = height.saturating_sub(blocks).max(1); + let consensus_state = block_on(self.consensus_verifier.state_at(height))?; + let churp_state = ChurpState::new(&consensus_state); + let status = churp_state + .status(runtime_id, churp_id)? + .ok_or(Error::StatusNotPublished)?; + + Ok(status) + } }