Skip to content

Commit 7d30855

Browse files
authored
Another chunk of documentation for beta-1 (#952)
* Another chunk of documentation for beta-1 * fix test
1 parent 9bd94ba commit 7d30855

File tree

11 files changed

+273
-74
lines changed

11 files changed

+273
-74
lines changed

Sources/DistributedActors/ActorAddress.swift

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ extension ClusterSystem {
5656
///
5757
/// Storing an `ActorID` instead of the concrete `DistributedActor` is also a common pattern to avoid
5858
/// retaining the actor, while retaining the ability to know if we have already stored this actor or not.
59-
/// For example, in a lobby system, we might need to only store actor identifiers, and ``LifecycleWatch/watchTermination``
59+
/// For example, in a lobby system, we might need to only store actor identifiers, and ``LifecycleWatch/watchTermination(of:whenTerminated:file:line:)``
6060
/// some actors, in order to not retain them in the lobby actor itself. If the same actor messages us again to "join",
6161
/// we would already know that we have already seen it, and could handle it joining again in some other way.
6262
///

Sources/DistributedActors/Cluster/Cluster+Member.swift

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -256,6 +256,9 @@ extension Cluster.MemberStatus {
256256
}
257257

258258
extension Cluster.MemberStatus {
259+
/// Compares two member status in terms of their "order" in the lifecycle of a member.
260+
261+
/// Ordering of membership status is as follows: `.joining` < `.up` < `.leaving` < `.down` < `.removed`.
259262
public static func < (lhs: Cluster.MemberStatus, rhs: Cluster.MemberStatus) -> Bool {
260263
switch lhs {
261264
case .joining:
@@ -293,10 +296,12 @@ extension Cluster {
293296
}
294297

295298
extension Cluster.MemberReachability {
299+
/// Returns `true` if the reachability is `.reachable`.
296300
public var isReachable: Bool {
297301
self == .reachable
298302
}
299303

304+
/// Returns `true` if the reachability is `.unreachable`.
300305
public var isUnreachable: Bool {
301306
self == .unreachable
302307
}

Sources/DistributedActors/Cluster/Cluster+Membership.swift

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -18,23 +18,25 @@ import Foundation
1818
// MARK: Cluster Membership
1919

2020
extension Cluster {
21-
/// `Membership` represents the set of members of this cluster.
21+
/// Represents the set of members of this cluster.
2222
///
2323
/// Membership changes are driven by nodes joining and leaving the cluster.
24-
/// Leaving the cluster may be graceful or triggered by a `FailureDetector`.
24+
/// Leaving the cluster may be graceful or triggered by a ``FailureDetector``.
2525
///
2626
/// ### Replacement (Unique)Nodes
27-
/// A node (or member) is referred to as a "replacement" if it shares _the same_ protocol+host+address (i.e. `Node`),
28-
/// with another member; It MAY join "over" an existing node and will immediately cause the previous node to be marked `MemberStatus.down`
27+
/// A node (or member) is referred to as a "replacement" if it shares _the same_ protocol+host+address (i.e. ``Node``),
28+
/// with another member; It MAY join "over" an existing node and will immediately cause the previous node to be marked ``Cluster/MemberStatus/down``
2929
/// upon such transition. Such situations can take place when an actor system node is killed and started on the same host+port immediately,
3030
/// and attempts to connect to the same cluster as its previous "incarnation". Such situation is called a replacement, and by the assumption
3131
/// of that it should not be possible to run many nodes on exact same host+port the previous node is immediately ejected and marked down.
3232
///
3333
/// ### Member state transitions
34-
/// Members can only move "forward" along their status lifecycle, refer to `Cluster.MemberStatus` docs for a diagram of legal transitions.
34+
/// Members can only move "forward" along their status lifecycle, refer to ``Cluster/MemberStatus``
35+
/// docs for a diagram of legal transitions.
3536
public struct Membership: ExpressibleByArrayLiteral {
3637
public typealias ArrayLiteralElement = Cluster.Member
3738

39+
/// Initialize an empty membership (with no members).
3840
public static var empty: Cluster.Membership {
3941
.init(members: [])
4042
}
@@ -45,6 +47,7 @@ extension Cluster {
4547
/// when operator issued moves are induced e.g. "> down 1.1.1.1:3333", since operators do not care about `NodeID` most of the time.
4648
internal var _members: [UniqueNode: Cluster.Member]
4749

50+
/// Initialize a membership with the given members.
4851
public init(members: [Cluster.Member]) {
4952
self._members = Dictionary(minimumCapacity: members.count)
5053
for member in members {
@@ -172,9 +175,9 @@ extension Cluster {
172175
// ==== ------------------------------------------------------------------------------------------------------------
173176
// MARK: Leaders
174177

175-
/// ## Leaders
176-
/// A leader is a specific `Member` which was selected to fulfil the leadership role for the time being.
177-
/// A leader returning a non-nil value, guarantees that the same Member existing as part of this `Membership` as well (non-members cannot be leaders).
178+
/// A leader is a specific ``Cluster/Member`` which was selected to fulfil the leadership role for the time being.
179+
///
180+
/// A leader returning a non-nil value, guarantees that the same ``Cluster/Member`` existing as part of this ``Cluster/Membership`` as well (non-members cannot be leaders).
178181
///
179182
/// Clustering offered by this project does not really designate any "special" nodes; yet sometimes a leader may be useful to make decisions more efficient or centralized.
180183
/// Leaders may be selected using various strategies, the most simple one being sorting members by their addresses and picking the "lowest".
@@ -221,6 +224,7 @@ extension Cluster {
221224
}
222225

223226
// Implementation notes: Membership/Member equality
227+
//
224228
// Membership equality is special, as it manually DOES take into account the Member's states (status, reachability),
225229
// whilst the Member equality by itself does not. This is somewhat ugly, however it allows us to perform automatic
226230
// seen table owner version updates whenever "the membership has changed." We may want to move away from this and make

Sources/DistributedActors/Cluster/ClusterControl.swift

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -128,7 +128,6 @@ public struct ClusterControl {
128128
/// pair however are accepted to join the cluster (though technically this is a newly joining node, not really a "re-join").
129129
///
130130
/// - SeeAlso: `Cluster.MemberStatus` for more discussion about what the `.down` status implies.
131-
132131
public func down(node: Node) {
133132
self.ref.tell(.command(.downCommand(node)))
134133
}

Sources/DistributedActors/Cluster/Reception/OperationLogDistributedReceptionist.swift

Lines changed: 11 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,6 @@ public distributed actor OpLogDistributedReceptionist: DistributedReceptionist,
239239
return ps
240240
}
241241

242-
// FIXME(swift 6): initializer must become async
243242
init(settings: ReceptionistSettings, system: ActorSystem) async {
244243
self.actorSystem = system
245244
self.instrumentation = system.settings.instrumentation.makeReceptionistInstrumentation()
@@ -284,32 +283,32 @@ public distributed actor OpLogDistributedReceptionist: DistributedReceptionist,
284283
// MARK: Receptionist API impl
285284

286285
extension OpLogDistributedReceptionist: LifecycleWatch {
287-
public nonisolated func register<Guest>(
286+
public nonisolated func checkIn<Guest>(
288287
_ guest: Guest,
289288
with key: DistributedReception.Key<Guest>
290289
) async where Guest: DistributedActor, Guest.ActorSystem == ClusterSystem {
291290
await self.whenLocal { __secretlyKnownToBeLocal in
292-
await __secretlyKnownToBeLocal._register(guest, with: key)
291+
await __secretlyKnownToBeLocal._checkIn(guest, with: key)
293292
}
294293
}
295294

296-
// 'local' implementation of register
297-
private func _register<Guest>(
295+
// 'local' implementation of checkIn
296+
private func _checkIn<Guest>(
298297
_ guest: Guest,
299298
with key: DistributedReception.Key<Guest>
300299
) async where Guest: DistributedActor, Guest.ActorSystem == ClusterSystem {
301-
self.log.warning("distributed receptionist: register(\(guest), with: \(key)")
300+
self.log.warning("distributed receptionist: checkIn(\(guest), with: \(key)")
302301
let key = key.asAnyKey
303302

304303
let id = guest.id
305304
let ref = actorSystem._resolveUntyped(id: guest.id)
306305

307306
guard id._isLocal || (id.uniqueNode == actorSystem.cluster.uniqueNode) else {
308307
self.log.warning("""
309-
Actor [\(guest.id)] attempted to register under key [\(key)], with NOT-local receptionist! \
310-
Actors MUST register with their local receptionist in today's Receptionist implementation.
308+
Actor [\(guest.id)] attempted to checkIn under key [\(key)], with NOT-local receptionist! \
309+
Actors MUST checkIn with their local receptionist in today's Receptionist implementation.
311310
""")
312-
return // TODO: This restriction could be lifted; perhaps we can direct the register to the right node?
311+
return // TODO: This restriction could be lifted; perhaps we can direct the checkIn to the right node?
313312
}
314313

315314
let sequenced: OpLog<ReceptionistOp>.SequencedOp =
@@ -343,8 +342,8 @@ extension OpLogDistributedReceptionist: LifecycleWatch {
343342
// TODO: reply "registered"?
344343
}
345344

346-
public nonisolated func subscribe<Guest>(
347-
to key: DistributedReception.Key<Guest>
345+
public nonisolated func listing<Guest>(
346+
of key: DistributedReception.Key<Guest>
348347
) async -> DistributedReception.GuestListing<Guest>
349348
where Guest: DistributedActor, Guest.ActorSystem == ClusterSystem
350349
{
@@ -359,7 +358,7 @@ extension OpLogDistributedReceptionist: LifecycleWatch {
359358
return r
360359
}
361360

362-
func _subscribe(
361+
func _listing(
363362
subscription: AnyDistributedReceptionListingSubscription
364363
) {
365364
if self.storage.addSubscription(key: subscription.key, subscription: subscription) {

0 commit comments

Comments
 (0)