Skip to content

Commit

Permalink
python: Add blame mode (identifiable aborts)
Browse files Browse the repository at this point in the history
  • Loading branch information
real-or-random committed Oct 18, 2024
1 parent 53959ce commit 4ee666e
Show file tree
Hide file tree
Showing 6 changed files with 179 additions and 32 deletions.
5 changes: 3 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -754,7 +754,7 @@ Perform a participant's first step of a ChillDKG session.
#### participant\_step2

```python
def participant_step2(hostseckey: bytes, state1: ParticipantState1, cmsg1: CoordinatorMsg1) -> Tuple[ParticipantState2, ParticipantMsg2]
def participant_step2(hostseckey: bytes, state1: ParticipantState1, cmsg1: CoordinatorMsg1, blame_rec: Optional[encpedpop.BlameRecord]) -> Tuple[ParticipantState2, ParticipantMsg2]
```

Perform a participant's second step of a ChillDKG session.
Expand All @@ -779,6 +779,7 @@ Perform a participant's second step of a ChillDKG session.
*Raises*:

- `SecKeyError` - If the length of `hostseckey` is not 32 bytes.
FIXME
- `FaultyParticipantError` - If `cmsg1` is invalid. This can happen if
another participant has sent an invalid message to the coordinator,
or if the coordinator has sent an invalid `cmsg1`.
Expand Down Expand Up @@ -837,7 +838,7 @@ of the success of the DKG session by presenting recovery data to us.
#### coordinator\_step1

```python
def coordinator_step1(pmsgs1: List[ParticipantMsg1], params: SessionParams) -> Tuple[CoordinatorState, CoordinatorMsg1]
def coordinator_step1(pmsgs1: List[ParticipantMsg1], params: SessionParams, blame: bool = True) -> Tuple[CoordinatorState, CoordinatorMsg1, List[Optional[encpedpop.BlameRecord]]]
```

Perform the coordinator's first step of a ChillDKG session.
Expand Down
18 changes: 13 additions & 5 deletions python/chilldkg_ref/chilldkg.py
Original file line number Diff line number Diff line change
Expand Up @@ -430,6 +430,7 @@ def participant_step2(
hostseckey: bytes,
state1: ParticipantState1,
cmsg1: CoordinatorMsg1,
blame_rec: Optional[encpedpop.BlameRecord],
) -> Tuple[ParticipantState2, ParticipantMsg2]:
"""Perform a participant's second step of a ChillDKG session.
Expand All @@ -448,6 +449,7 @@ def participant_step2(
Raises:
SecKeyError: If the length of `hostseckey` is not 32 bytes.
FIXME
FaultyParticipantError: If `cmsg1` is invalid. This can happen if
another participant has sent an invalid message to the coordinator,
or if the coordinator has sent an invalid `cmsg1`.
Expand All @@ -468,6 +470,7 @@ def participant_step2(
deckey=hostseckey,
cmsg=enc_cmsg,
enc_secshare=enc_secshares[idx],
blame_rec=blame_rec,
)
# Include the enc_shares in eq_input to ensure that participants agree on all
# shares, which in turn ensures that they have the right recovery data.
Expand Down Expand Up @@ -531,8 +534,8 @@ class CoordinatorState(NamedTuple):


def coordinator_step1(
pmsgs1: List[ParticipantMsg1], params: SessionParams
) -> Tuple[CoordinatorState, CoordinatorMsg1]:
pmsgs1: List[ParticipantMsg1], params: SessionParams, blame: bool = True
) -> Tuple[CoordinatorState, CoordinatorMsg1, List[Optional[encpedpop.BlameRecord]]]:
"""Perform the coordinator's first step of a ChillDKG session.
Arguments:
Expand All @@ -555,14 +558,19 @@ def coordinator_step1(
params_validate(params)
(hostpubkeys, t) = params

enc_cmsg, enc_dkg_output, eq_input, enc_secshares = encpedpop.coordinator_step(
pmsgs=[pmsg1.enc_pmsg for pmsg1 in pmsgs1], t=t, enckeys=hostpubkeys
enc_cmsg, enc_dkg_output, eq_input, enc_secshares, blame_recs = (
encpedpop.coordinator_step(
pmsgs=[pmsg1.enc_pmsg for pmsg1 in pmsgs1],
t=t,
enckeys=hostpubkeys,
blame=blame,
)
)
eq_input += b"".join([bytes_from_int(int(share)) for share in enc_secshares])
dkg_output = DKGOutput._make(enc_dkg_output) # Convert to chilldkg.DKGOutput type
state = CoordinatorState(params, eq_input, dkg_output)
cmsg1 = CoordinatorMsg1(enc_cmsg, enc_secshares)
return state, cmsg1
return state, cmsg1, blame_recs


def coordinator_finalize(
Expand Down
67 changes: 61 additions & 6 deletions python/chilldkg_ref/encpedpop.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from typing import Tuple, List, NamedTuple
from typing import Tuple, List, NamedTuple, Optional

from secp256k1proto.secp256k1 import Scalar
from secp256k1proto.secp256k1 import Scalar, GE
from secp256k1proto.ecdh import ecdh_libsecp256k1
from secp256k1proto.keys import pubkey_gen_plain
from secp256k1proto.util import int_from_bytes
Expand Down Expand Up @@ -159,6 +159,11 @@ class CoordinatorMsg(NamedTuple):
pubnonces: List[bytes]


class BlameRecord(NamedTuple):
enc_partial_secshares: List[Scalar]
partial_pubshares: List[GE]


###
### Participant
###
Expand Down Expand Up @@ -204,7 +209,7 @@ def participant_step1(
# case someone derives secnonce differently.
simpl_seed = derive_simpl_seed(seed, pubnonce, enc_context)

simpl_state, simpl_pmsg, shares = simplpedpop.participant_step1(
simpl_state, simpl_pmsg, shares, _ = simplpedpop.participant_step1(
simpl_seed, t, n, idx
)
assert len(shares) == n
Expand All @@ -223,9 +228,11 @@ def participant_step2(
deckey: bytes,
cmsg: CoordinatorMsg,
enc_secshare: Scalar,
blame_rec: Optional[BlameRecord] = None,
) -> Tuple[simplpedpop.DKGOutput, bytes]:
simpl_state, pubnonce, enckeys, idx = state
simpl_cmsg, pubnonces = cmsg
n = len(enckeys)

reported_pubnonce = pubnonces[idx]
if reported_pubnonce != pubnonce:
Expand All @@ -235,8 +242,27 @@ def participant_step2(
secshare = decrypt_sum(
deckey, enckeys[idx], pubnonces, enc_context, idx, enc_secshare
)

if blame_rec is not None:
enc_partial_secshares, partial_pubshares = blame_rec
partial_secshares = [
decrypt(
deckey,
enckeys[idx],
pubnonces[i],
enc_context,
idx,
i,
enc_partial_secshares[i],
)
for i in range(n)
]
simpl_blame_rec = simplpedpop.BlameRecord(partial_secshares, partial_pubshares)
else:
simpl_blame_rec = None

dkg_output, eq_input = simplpedpop.participant_step2(
simpl_state, simpl_cmsg, secshare
simpl_state, simpl_cmsg, secshare, simpl_blame_rec
)
eq_input += b"".join(enckeys) + b"".join(pubnonces)
return dkg_output, eq_input
Expand All @@ -251,7 +277,14 @@ def coordinator_step(
pmsgs: List[ParticipantMsg],
t: int,
enckeys: List[bytes],
) -> Tuple[CoordinatorMsg, simplpedpop.DKGOutput, bytes, List[Scalar]]:
blame: bool = True,
) -> Tuple[
CoordinatorMsg,
simplpedpop.DKGOutput,
bytes,
List[Scalar],
List[Optional[BlameRecord]],
]:
n = len(enckeys)
if n != len(pmsgs):
raise ValueError
Expand All @@ -267,6 +300,22 @@ def coordinator_step(
enc_secshares = [
Scalar.sum(*([pmsg.enc_shares[i] for pmsg in pmsgs])) for i in range(n)
]

blame_recs: List[Optional[BlameRecord]]
if blame:
enc_partial_secshares = [
[pmsg.enc_shares[i] for pmsg in pmsgs] for i in range(n)
]
partial_pubshares = [
[pmsg.simpl_pmsg.com.pubshare(i) for pmsg in pmsgs] for i in range(n)
]
blame_recs = [
BlameRecord(enc_partial_secshares[i], partial_pubshares[i])
for i in range(n)
]
else:
blame_recs = [None for i in range(n)]

eq_input += b"".join(enckeys) + b"".join(pubnonces)
# In ChillDKG, the coordinator needs to broadcast the entire enc_secshares
# array to all participants. But in pure EncPedPop, the coordinator needs to
Expand All @@ -277,4 +326,10 @@ def coordinator_step(
# chilldkg.coordinator_step can pick it up. Implementations of pure
# EncPedPop will need to decide how to transmit enc_secshares[i] to
# participant i; we leave this unspecified.
return CoordinatorMsg(simpl_cmsg, pubnonces), dkg_output, eq_input, enc_secshares
return (
CoordinatorMsg(simpl_cmsg, pubnonces),
dkg_output,
eq_input,
enc_secshares,
blame_recs,
)
62 changes: 57 additions & 5 deletions python/chilldkg_ref/simplpedpop.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from secrets import token_bytes as random_bytes
from typing import List, NamedTuple, NewType, Tuple, Optional
from typing import List, NamedTuple, NewType, Tuple, Optional, cast

from secp256k1proto.bip340 import schnorr_sign, schnorr_verify
from secp256k1proto.secp256k1 import GE, Scalar
Expand Down Expand Up @@ -62,6 +62,11 @@ def to_bytes(self) -> bytes:
) + b"".join(self.pops)


class BlameRecord(NamedTuple):
partial_secshares: List[Scalar]
partial_pubshares: List[GE]


###
### Other common definitions
###
Expand Down Expand Up @@ -100,7 +105,7 @@ class ParticipantState(NamedTuple):


def participant_step1(
seed: bytes, t: int, n: int, idx: int
seed: bytes, t: int, n: int, idx: int, blame: bool = True
) -> Tuple[
ParticipantState,
ParticipantMsg,
Expand All @@ -125,7 +130,14 @@ def participant_step1(
com_to_secret = com.commitment_to_secret()
msg = ParticipantMsg(com, pop)
state = ParticipantState(t, n, idx, com_to_secret)
return state, msg, partial_secshares_from_me

partial_pubshares_from_me: List[Optional[GE]]
if blame:
partial_pubshares_from_me = [com.pubshare(i) for i in range(n)]
else:
partial_pubshares_from_me = [None for i in range(n)]

return state, msg, partial_secshares_from_me, partial_pubshares_from_me


# Helper function to prepare the secret side inputs for participant idx's
Expand All @@ -151,6 +163,7 @@ def participant_step2(
state: ParticipantState,
cmsg: CoordinatorMsg,
secshare: Scalar,
blame_rec: Optional[BlameRecord] = None,
) -> Tuple[DKGOutput, bytes]:
t, n, idx, com_to_secret = state
coms_to_secrets, sum_coms_to_nonconst_terms, pops = cmsg
Expand Down Expand Up @@ -181,8 +194,46 @@ def participant_step2(
sum_coms = assemble_sum_coms(coms_to_secrets, sum_coms_to_nonconst_terms, n)
threshold_pubkey = sum_coms.commitment_to_secret()
pubshares = [sum_coms.pubshare(i) for i in range(n)]

if not VSSCommitment.verify_secshare(secshare, pubshares[idx]):
raise FaultyParticipantError(None, "Received invalid secshare")
if blame_rec is None:
raise FaultyParticipantError(None, "Received invalid secshare")
else:
# TODO Extract function
partial_secshares, partial_pubshares = blame_rec

# TODO Shoud we include these checks? They're superficial but diligent:
# Alternatively, solve the redudancy by sending only n-1
# FIXME Reconsider this check. That's not a faulty coordinator, the
# coordinator is involved the secshares in simplpedpop. Perhaps
# we can move this check to encpedpop (and perform it on the
# encrypted shares). Or additional raise something else here.
# How can this even fail in a run of pure simplpedpop? I think only
# if the prepare function was wrong.
if Scalar.sum(*partial_secshares) != secshare:
raise FaultyCoordinatorError(
"Sum of partial secshares not equal to secshare"
)
# FIXME Similar, but this can fail if either the coordinator was
# wrong, or the prepare function.
if GE.sum(*partial_pubshares) != pubshares[idx]:
raise FaultyCoordinatorError(
"Sum of partial pubshares not equal to pubshare"
)
for i in range(n):
if not VSSCommitment.verify_secshare(
partial_secshares[i], partial_pubshares[i]
):
if i != idx:
raise FaultyParticipantError(
i, "Participant sent invalid partial secshare"
)
else:
# We are not faulty, so it must be the coordinator.
raise FaultyCoordinatorError(
"Would blame myself" # TODO better message
)
assert False

dkg_output = DKGOutput(
secshare.to_bytes(),
Expand All @@ -201,7 +252,7 @@ def participant_step2(
def coordinator_step(
pmsgs: List[ParticipantMsg], t: int, n: int
) -> Tuple[CoordinatorMsg, DKGOutput, bytes]:
# Sum the commitments to the i-th coefficients for i > 0
# Sum the commitments to the i-th coefficients for i > 0 # FIXME
#
# This procedure is introduced by Pedersen in Section 5.1 of
# 'Non-Interactive and Information-Theoretic Secure Verifiable Secret
Expand All @@ -221,6 +272,7 @@ def coordinator_step(
sum_coms = assemble_sum_coms(coms_to_secrets, sum_coms_to_nonconst_terms, n)
threshold_pubkey = sum_coms.commitment_to_secret()
pubshares = [sum_coms.pubshare(i) for i in range(n)]

dkg_output = DKGOutput(
None,
threshold_pubkey.to_bytes_compressed(),
Expand Down
Loading

0 comments on commit 4ee666e

Please sign in to comment.