Skip to content

Commit

Permalink
Another chunk of documentation for beta-1 (#952)
Browse files Browse the repository at this point in the history
* Another chunk of documentation for beta-1

* fix test
  • Loading branch information
ktoso authored Jun 6, 2022
1 parent 9bd94ba commit 7d30855
Show file tree
Hide file tree
Showing 11 changed files with 273 additions and 74 deletions.
2 changes: 1 addition & 1 deletion Sources/DistributedActors/ActorAddress.swift
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ extension ClusterSystem {
///
/// Storing an `ActorID` instead of the concrete `DistributedActor` is also a common pattern to avoid
/// retaining the actor, while retaining the ability to know if we have already stored this actor or not.
/// For example, in a lobby system, we might need to only store actor identifiers, and ``LifecycleWatch/watchTermination``
/// For example, in a lobby system, we might need to only store actor identifiers, and ``LifecycleWatch/watchTermination(of:whenTerminated:file:line:)``
/// some actors, in order to not retain them in the lobby actor itself. If the same actor messages us again to "join",
/// we would already know that we have already seen it, and could handle it joining again in some other way.
///
Expand Down
5 changes: 5 additions & 0 deletions Sources/DistributedActors/Cluster/Cluster+Member.swift
Original file line number Diff line number Diff line change
Expand Up @@ -256,6 +256,9 @@ extension Cluster.MemberStatus {
}

extension Cluster.MemberStatus {
/// Compares two member status in terms of their "order" in the lifecycle of a member.

/// Ordering of membership status is as follows: `.joining` < `.up` < `.leaving` < `.down` < `.removed`.
public static func < (lhs: Cluster.MemberStatus, rhs: Cluster.MemberStatus) -> Bool {
switch lhs {
case .joining:
Expand Down Expand Up @@ -293,10 +296,12 @@ extension Cluster {
}

extension Cluster.MemberReachability {
/// Returns `true` if the reachability is `.reachable`.
public var isReachable: Bool {
self == .reachable
}

/// Returns `true` if the reachability is `.unreachable`.
public var isUnreachable: Bool {
self == .unreachable
}
Expand Down
20 changes: 12 additions & 8 deletions Sources/DistributedActors/Cluster/Cluster+Membership.swift
Original file line number Diff line number Diff line change
Expand Up @@ -18,23 +18,25 @@ import Foundation
// MARK: Cluster Membership

extension Cluster {
/// `Membership` represents the set of members of this cluster.
/// Represents the set of members of this cluster.
///
/// Membership changes are driven by nodes joining and leaving the cluster.
/// Leaving the cluster may be graceful or triggered by a `FailureDetector`.
/// Leaving the cluster may be graceful or triggered by a ``FailureDetector``.
///
/// ### Replacement (Unique)Nodes
/// A node (or member) is referred to as a "replacement" if it shares _the same_ protocol+host+address (i.e. `Node`),
/// with another member; It MAY join "over" an existing node and will immediately cause the previous node to be marked `MemberStatus.down`
/// A node (or member) is referred to as a "replacement" if it shares _the same_ protocol+host+address (i.e. ``Node``),
/// with another member; It MAY join "over" an existing node and will immediately cause the previous node to be marked ``Cluster/MemberStatus/down``
/// upon such transition. Such situations can take place when an actor system node is killed and started on the same host+port immediately,
/// and attempts to connect to the same cluster as its previous "incarnation". Such situation is called a replacement, and by the assumption
/// of that it should not be possible to run many nodes on exact same host+port the previous node is immediately ejected and marked down.
///
/// ### Member state transitions
/// Members can only move "forward" along their status lifecycle, refer to `Cluster.MemberStatus` docs for a diagram of legal transitions.
/// Members can only move "forward" along their status lifecycle, refer to ``Cluster/MemberStatus``
/// docs for a diagram of legal transitions.
public struct Membership: ExpressibleByArrayLiteral {
public typealias ArrayLiteralElement = Cluster.Member

/// Initialize an empty membership (with no members).
public static var empty: Cluster.Membership {
.init(members: [])
}
Expand All @@ -45,6 +47,7 @@ extension Cluster {
/// when operator issued moves are induced e.g. "> down 1.1.1.1:3333", since operators do not care about `NodeID` most of the time.
internal var _members: [UniqueNode: Cluster.Member]

/// Initialize a membership with the given members.
public init(members: [Cluster.Member]) {
self._members = Dictionary(minimumCapacity: members.count)
for member in members {
Expand Down Expand Up @@ -172,9 +175,9 @@ extension Cluster {
// ==== ------------------------------------------------------------------------------------------------------------
// MARK: Leaders

/// ## Leaders
/// A leader is a specific `Member` which was selected to fulfil the leadership role for the time being.
/// A leader returning a non-nil value, guarantees that the same Member existing as part of this `Membership` as well (non-members cannot be leaders).
/// A leader is a specific ``Cluster/Member`` which was selected to fulfil the leadership role for the time being.
///
/// A leader returning a non-nil value, guarantees that the same ``Cluster/Member`` existing as part of this ``Cluster/Membership`` as well (non-members cannot be leaders).
///
/// Clustering offered by this project does not really designate any "special" nodes; yet sometimes a leader may be useful to make decisions more efficient or centralized.
/// Leaders may be selected using various strategies, the most simple one being sorting members by their addresses and picking the "lowest".
Expand Down Expand Up @@ -221,6 +224,7 @@ extension Cluster {
}

// Implementation notes: Membership/Member equality
//
// Membership equality is special, as it manually DOES take into account the Member's states (status, reachability),
// whilst the Member equality by itself does not. This is somewhat ugly, however it allows us to perform automatic
// seen table owner version updates whenever "the membership has changed." We may want to move away from this and make
Expand Down
1 change: 0 additions & 1 deletion Sources/DistributedActors/Cluster/ClusterControl.swift
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,6 @@ public struct ClusterControl {
/// pair however are accepted to join the cluster (though technically this is a newly joining node, not really a "re-join").
///
/// - SeeAlso: `Cluster.MemberStatus` for more discussion about what the `.down` status implies.

public func down(node: Node) {
self.ref.tell(.command(.downCommand(node)))
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,6 @@ public distributed actor OpLogDistributedReceptionist: DistributedReceptionist,
return ps
}

// FIXME(swift 6): initializer must become async
init(settings: ReceptionistSettings, system: ActorSystem) async {
self.actorSystem = system
self.instrumentation = system.settings.instrumentation.makeReceptionistInstrumentation()
Expand Down Expand Up @@ -284,32 +283,32 @@ public distributed actor OpLogDistributedReceptionist: DistributedReceptionist,
// MARK: Receptionist API impl

extension OpLogDistributedReceptionist: LifecycleWatch {
public nonisolated func register<Guest>(
public nonisolated func checkIn<Guest>(
_ guest: Guest,
with key: DistributedReception.Key<Guest>
) async where Guest: DistributedActor, Guest.ActorSystem == ClusterSystem {
await self.whenLocal { __secretlyKnownToBeLocal in
await __secretlyKnownToBeLocal._register(guest, with: key)
await __secretlyKnownToBeLocal._checkIn(guest, with: key)
}
}

// 'local' implementation of register
private func _register<Guest>(
// 'local' implementation of checkIn
private func _checkIn<Guest>(
_ guest: Guest,
with key: DistributedReception.Key<Guest>
) async where Guest: DistributedActor, Guest.ActorSystem == ClusterSystem {
self.log.warning("distributed receptionist: register(\(guest), with: \(key)")
self.log.warning("distributed receptionist: checkIn(\(guest), with: \(key)")
let key = key.asAnyKey

let id = guest.id
let ref = actorSystem._resolveUntyped(id: guest.id)

guard id._isLocal || (id.uniqueNode == actorSystem.cluster.uniqueNode) else {
self.log.warning("""
Actor [\(guest.id)] attempted to register under key [\(key)], with NOT-local receptionist! \
Actors MUST register with their local receptionist in today's Receptionist implementation.
Actor [\(guest.id)] attempted to checkIn under key [\(key)], with NOT-local receptionist! \
Actors MUST checkIn with their local receptionist in today's Receptionist implementation.
""")
return // TODO: This restriction could be lifted; perhaps we can direct the register to the right node?
return // TODO: This restriction could be lifted; perhaps we can direct the checkIn to the right node?
}

let sequenced: OpLog<ReceptionistOp>.SequencedOp =
Expand Down Expand Up @@ -343,8 +342,8 @@ extension OpLogDistributedReceptionist: LifecycleWatch {
// TODO: reply "registered"?
}

public nonisolated func subscribe<Guest>(
to key: DistributedReception.Key<Guest>
public nonisolated func listing<Guest>(
of key: DistributedReception.Key<Guest>
) async -> DistributedReception.GuestListing<Guest>
where Guest: DistributedActor, Guest.ActorSystem == ClusterSystem
{
Expand All @@ -359,7 +358,7 @@ extension OpLogDistributedReceptionist: LifecycleWatch {
return r
}

func _subscribe(
func _listing(
subscription: AnyDistributedReceptionListingSubscription
) {
if self.storage.addSubscription(key: subscription.key, subscription: subscription) {
Expand Down
Loading

0 comments on commit 7d30855

Please sign in to comment.