From d976c5ab4eb66f4f3b3b811ae0ce325a2adaa809 Mon Sep 17 00:00:00 2001 From: ain ghazal Date: Wed, 10 Jan 2024 17:57:21 +0100 Subject: [PATCH 1/2] ci: bump the version to 1.20 --- .github/workflows/build.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 28970c05..4d7df513 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: - name: setup go uses: actions/setup-go@v2 with: - go-version: '1.18' + go-version: '1.20' - name: Run short tests run: go test --short -cover ./vpn @@ -33,7 +33,7 @@ jobs: - name: setup go uses: actions/setup-go@v2 with: - go-version: '1.18' + go-version: '1.20' - name: Ensure coverage threshold run: make test-coverage-threshold @@ -44,7 +44,7 @@ jobs: - name: setup go uses: actions/setup-go@v2 with: - go-version: '1.18' + go-version: '1.20' - name: run integration tests run: go test -v ./tests/integration From 4d0ca1359e084e3ef46adcec527c60f8ec2efff7 Mon Sep 17 00:00:00 2001 From: Ain Ghazal <99027643+ainghazal@users.noreply.github.com> Date: Mon, 15 Jan 2024 14:38:58 +0100 Subject: [PATCH 2/2] refactor: introduce the packetmuxer layer (#49) This is the second commit in the series of incremental refactoring of the current minivpn tree. In this commit, we introduce the packet muxer, which is the layer just above the networkio. The packet muxer handles and routes data or control packets, and it also handles reset packets. As dependencies for `packetmuxer`, we're also introducing the `optional` and `session` packages. Reference issue: #47 --------- Co-authored-by: Simone Basso --- internal/optional/optional.go | 58 +++++ internal/packetmuxer/service.go | 288 +++++++++++++++++++++++ internal/session/datachannelkey.go | 57 +++++ internal/session/doc.go | 4 + internal/session/keysource.go | 60 +++++ internal/session/manager.go | 355 +++++++++++++++++++++++++++++ 6 files changed, 822 insertions(+) create mode 100644 internal/optional/optional.go create mode 100644 internal/packetmuxer/service.go create mode 100644 internal/session/datachannelkey.go create mode 100644 internal/session/doc.go create mode 100644 internal/session/keysource.go create mode 100644 internal/session/manager.go diff --git a/internal/optional/optional.go b/internal/optional/optional.go new file mode 100644 index 00000000..f330b812 --- /dev/null +++ b/internal/optional/optional.go @@ -0,0 +1,58 @@ +package optional + +import ( + "reflect" + + "github.com/ooni/minivpn/internal/runtimex" +) + +// Value is an optional value. The zero value of this structure +// is equivalent to the one you get when calling [None]. +type Value[T any] struct { + // indirect is the indirect pointer to the value. + indirect *T +} + +// None constructs an empty value. +func None[T any]() Value[T] { + return Value[T]{nil} +} + +// Some constructs a some value unless T is a pointer and points to +// nil, in which case [Some] is equivalent to [None]. +func Some[T any](value T) Value[T] { + v := Value[T]{} + maybeSetFromValue(&v, value) + return v +} + +// maybeSetFromValue sets the underlying value unless T is a pointer +// and points to nil in which case we set the Value to be empty. +func maybeSetFromValue[T any](v *Value[T], value T) { + rv := reflect.ValueOf(value) + if rv.Type().Kind() == reflect.Pointer && rv.IsNil() { + v.indirect = nil + return + } + v.indirect = &value +} + +// IsNone returns whether this [Value] is empty. +func (v Value[T]) IsNone() bool { + return v.indirect == nil +} + +// Unwrap returns the underlying value or panics. In case of +// panic, the value passed to panic is an error. +func (v Value[T]) Unwrap() T { + runtimex.Assert(!v.IsNone(), "is none") + return *v.indirect +} + +// UnwrapOr returns the fallback if the [Value] is empty. +func (v Value[T]) UnwrapOr(fallback T) T { + if v.IsNone() { + return fallback + } + return v.Unwrap() +} diff --git a/internal/packetmuxer/service.go b/internal/packetmuxer/service.go new file mode 100644 index 00000000..d64dffc4 --- /dev/null +++ b/internal/packetmuxer/service.go @@ -0,0 +1,288 @@ +// Package packetmuxer implements the packet-muxer workers. +package packetmuxer + +import ( + "github.com/ooni/minivpn/internal/model" + "github.com/ooni/minivpn/internal/session" + "github.com/ooni/minivpn/internal/workers" +) + +// Service is the packetmuxer service. Make sure you initialize +// the channels before invoking [Service.StartWorkers]. +type Service struct { + // HardReset receives requests to initiate a hard reset, that will start the openvpn handshake. + HardReset chan any + + // NotifyTLS sends reset notifications to tlsstate. + NotifyTLS *chan *model.Notification + + // MuxerToReliable moves packets up to reliabletransport. + MuxerToReliable *chan *model.Packet + + // MuxerToData moves packets up to the datachannel. + MuxerToData *chan *model.Packet + + // DataOrControlToMuxer moves packets down from the reliabletransport or datachannel. + DataOrControlToMuxer chan *model.Packet + + // MuxerToNetwork moves bytes down to the networkio layer below us. + MuxerToNetwork *chan []byte + + // NetworkToMuxer moves bytes up to us from the networkio layer below. + NetworkToMuxer chan []byte +} + +// StartWorkers starts the packet-muxer workers. See the [ARCHITECTURE] +// file for more information about the packet-muxer workers. +// +// [ARCHITECTURE]: https://github.com/ooni/minivpn/blob/main/ARCHITECTURE.md +func (s *Service) StartWorkers( + logger model.Logger, + workersManager *workers.Manager, + sessionManager *session.Manager, +) { + ws := &workersState{ + logger: logger, + hardReset: s.HardReset, + notifyTLS: *s.NotifyTLS, + muxerToReliable: *s.MuxerToReliable, + muxerToData: *s.MuxerToData, + dataOrControlToMuxer: s.DataOrControlToMuxer, + muxerToNetwork: *s.MuxerToNetwork, + networkToMuxer: s.NetworkToMuxer, + sessionManager: sessionManager, + workersManager: workersManager, + } + workersManager.StartWorker(ws.moveUpWorker) + workersManager.StartWorker(ws.moveDownWorker) +} + +// workersState contains the reliabletransport workers state. +type workersState struct { + // logger is the logger to use + logger model.Logger + + // hardReset is the channel posted to force a hard reset. + hardReset <-chan any + + // notifyTLS is used to send notifications to the TLS service. + notifyTLS chan<- *model.Notification + + // dataOrControlToMuxer is the channel for reading all the packets traveling down the stack. + dataOrControlToMuxer <-chan *model.Packet + + // muxerToReliable is the channel for writing control packets going up the stack. + muxerToReliable chan<- *model.Packet + + // muxerToData is the channel for writing data packets going up the stack. + muxerToData chan<- *model.Packet + + // muxerToNetwork is the channel for writing raw packets going down the stack. + muxerToNetwork chan<- []byte + + // networkToMuxer is the channel for reading raw packets going up the stack. + networkToMuxer <-chan []byte + + // sessionManager manages the OpenVPN session. + sessionManager *session.Manager + + // workersManager controls the workers lifecycle. + workersManager *workers.Manager +} + +// moveUpWorker moves packets up the stack +func (ws *workersState) moveUpWorker() { + defer func() { + ws.workersManager.OnWorkerDone() + ws.workersManager.StartShutdown() + ws.logger.Debug("packetmuxer: moveUpWorker: done") + }() + + ws.logger.Debug("packetmuxer: moveUpWorker: started") + + for { + // POSSIBLY BLOCK awaiting for incoming raw packet + select { + case rawPacket := <-ws.networkToMuxer: + if err := ws.handleRawPacket(rawPacket); err != nil { + // error already printed + return + } + + case <-ws.hardReset: + if err := ws.startHardReset(); err != nil { + // error already logged + return + } + + case <-ws.workersManager.ShouldShutdown(): + return + } + } +} + +// moveDownWorker moves packets down the stack +func (ws *workersState) moveDownWorker() { + defer func() { + ws.workersManager.OnWorkerDone() + ws.workersManager.StartShutdown() + ws.logger.Debug("packetmuxer: moveDownWorker: done") + }() + + ws.logger.Debug("packetmuxer: moveDownWorker: started") + + for { + // POSSIBLY BLOCK on reading the packet moving down the stack + select { + case packet := <-ws.dataOrControlToMuxer: + // serialize the packet + rawPacket, err := packet.Bytes() + if err != nil { + ws.logger.Warnf("packetmuxer: cannot serialize packet: %s", err.Error()) + continue + } + + // While this channel send could possibly block, the [ARCHITECTURE] is + // such that (1) the channel is buffered and (2) the channel sender should + // avoid blocking when inserting data into the channel. + // + // [ARCHITECTURE]: https://github.com/ooni/minivpn/blob/main/ARCHITECTURE.md + select { + case ws.muxerToNetwork <- rawPacket: + default: + // drop the packet if the buffer is full as documented above + case <-ws.workersManager.ShouldShutdown(): + return + } + + case <-ws.workersManager.ShouldShutdown(): + return + } + } +} + +// startHardReset is invoked when we need to perform a HARD RESET. +func (ws *workersState) startHardReset() error { + // emit a CONTROL_HARD_RESET_CLIENT_V2 pkt + packet, err := ws.sessionManager.NewPacket(model.P_CONTROL_HARD_RESET_CLIENT_V2, nil) + if err != nil { + ws.logger.Warnf("packetmuxer: NewPacket: %s", err.Error()) + return err + } + if err := ws.serializeAndEmit(packet); err != nil { + return err + } + + // reset the state to become initial again + ws.sessionManager.SetNegotiationState(session.S_PRE_START) + + // TODO: any other change to apply in this case? + + return nil +} + +// handleRawPacket is the code invoked to handle a raw packet. +func (ws *workersState) handleRawPacket(rawPacket []byte) error { + // make sense of the packet + packet, err := model.ParsePacket(rawPacket) + if err != nil { + ws.logger.Warnf("packetmuxer: moveUpWorker: ParsePacket: %s", err.Error()) + return nil // keep running + } + + // handle the case where we're performing a HARD_RESET + if ws.sessionManager.NegotiationState() == session.S_PRE_START && + packet.Opcode == model.P_CONTROL_HARD_RESET_SERVER_V2 { + return ws.finishThreeWayHandshake(packet) + } + + // TODO: introduce other sanity checks here + + // multiplex the incoming packet POSSIBLY BLOCKING on delivering it + if packet.IsControl() || packet.Opcode == model.P_ACK_V1 { + select { + case ws.muxerToReliable <- packet: + case <-ws.workersManager.ShouldShutdown(): + return workers.ErrShutdown + } + } else { + select { + case ws.muxerToData <- packet: + case <-ws.workersManager.ShouldShutdown(): + return workers.ErrShutdown + } + } + + return nil +} + +// finishThreeWayHandshake responsds to the HARD_RESET_SERVER and finishes the handshake. +func (ws *workersState) finishThreeWayHandshake(packet *model.Packet) error { + // register the server's session (note: the PoV is the server's one) + ws.sessionManager.SetRemoteSessionID(packet.LocalSessionID) + + // we need to manually ACK because the reliable layer is above us + ws.logger.Debugf( + "< %s localID=%x remoteID=%x [%d bytes]", + packet.Opcode, + packet.LocalSessionID, + packet.RemoteSessionID, + len(packet.Payload), + ) + + // create the ACK packet + ACK, err := ws.sessionManager.NewACKForPacket(packet) + if err != nil { + return err + } + + // emit the packet + if err := ws.serializeAndEmit(ACK); err != nil { + return err + } + + // advance the state + ws.sessionManager.SetNegotiationState(session.S_START) + + // attempt to tell TLS we want to handshake + select { + case ws.notifyTLS <- &model.Notification{Flags: model.NotificationReset}: + // nothing + + default: + // the architecture says this notification should be nonblocking + + case <-ws.workersManager.ShouldShutdown(): + return workers.ErrShutdown + } + + return nil +} + +// serializeAndEmit was written because Ain Ghazal was very insistent about it. +func (ws *workersState) serializeAndEmit(packet *model.Packet) error { + // serialize it + rawPacket, err := packet.Bytes() + if err != nil { + return err + } + + // emit the packet + select { + case ws.muxerToNetwork <- rawPacket: + // nothing + + case <-ws.workersManager.ShouldShutdown(): + return workers.ErrShutdown + } + + ws.logger.Debugf( + "> %s localID=%x remoteID=%x [%d bytes]", + packet.Opcode, + packet.LocalSessionID, + packet.RemoteSessionID, + len(packet.Payload), + ) + + return nil +} diff --git a/internal/session/datachannelkey.go b/internal/session/datachannelkey.go new file mode 100644 index 00000000..99b08e57 --- /dev/null +++ b/internal/session/datachannelkey.go @@ -0,0 +1,57 @@ +package session + +import ( + "errors" + "fmt" + "sync" +) + +// DataChannelKey represents a pair of key sources that have been negotiated +// over the control channel, and from which we will derive local and remote +// keys for encryption and decrption over the data channel. The index refers to +// the short key_id that is passed in the lower 3 bits if a packet header. +// The setup of the keys for a given data channel (that is, for every key_id) +// is made by expanding the keysources using the prf function. +// +// Do note that we are not yet implementing key renegotiation - but the index +// is provided for convenience when/if we support that in the future. +type DataChannelKey struct { + index uint32 + ready bool + local *KeySource + remote *KeySource + mu sync.Mutex +} + +// errDayaChannelKey is a [DataChannelKey] error. +var errDataChannelKey = errors.New("bad data-channel key") + +// Local returns the local [KeySource] +func (dck *DataChannelKey) Local() *KeySource { + return dck.local +} + +// Remote returns the local [KeySource] +func (dck *DataChannelKey) Remote() *KeySource { + return dck.remote +} + +// AddRemoteKey adds the server keySource to our dataChannelKey. This makes the +// dataChannelKey ready to be used. +func (dck *DataChannelKey) AddRemoteKey(k *KeySource) error { + dck.mu.Lock() + defer dck.mu.Unlock() + if dck.ready { + return fmt.Errorf("%w: %s", errDataChannelKey, "cannot overwrite remote key slot") + } + dck.remote = k + dck.ready = true + return nil +} + +// Ready returns whether the [DataChannelKey] is ready. +func (dck *DataChannelKey) Ready() bool { + dck.mu.Lock() + defer dck.mu.Unlock() + return dck.ready +} diff --git a/internal/session/doc.go b/internal/session/doc.go new file mode 100644 index 00000000..87077762 --- /dev/null +++ b/internal/session/doc.go @@ -0,0 +1,4 @@ +// Package session keeps state for the application, including internal state +// transitions for the OpenVPN protocol, data channel keys, and all the state +// pertaining to the different packet counters. +package session diff --git a/internal/session/keysource.go b/internal/session/keysource.go new file mode 100644 index 00000000..d40d7473 --- /dev/null +++ b/internal/session/keysource.go @@ -0,0 +1,60 @@ +package session + +import ( + "bytes" + "errors" + "fmt" + + "github.com/ooni/minivpn/internal/bytesx" +) + +// randomFn mocks the function to generate random bytes. +var randomFn = bytesx.GenRandomBytes + +// errRandomBytes is the error returned when we cannot generate random bytes. +var errRandomBytes = errors.New("error generating random bytes") + +// KeySource contains random data to generate keys. +type KeySource struct { + R1 [32]byte + R2 [32]byte + PreMaster [48]byte +} + +// Bytes returns the byte representation of a [KeySource]. +func (k *KeySource) Bytes() []byte { + buf := &bytes.Buffer{} + buf.Write(k.PreMaster[:]) + buf.Write(k.R1[:]) + buf.Write(k.R2[:]) + return buf.Bytes() +} + +// NewKeySource constructs a new [KeySource]. +func NewKeySource() (*KeySource, error) { + random1, err := randomFn(32) + if err != nil { + return nil, fmt.Errorf("%w: %s", errRandomBytes, err.Error()) + } + + var r1, r2 [32]byte + var preMaster [48]byte + copy(r1[:], random1) + + random2, err := randomFn(32) + if err != nil { + return nil, fmt.Errorf("%w: %s", errRandomBytes, err.Error()) + } + copy(r2[:], random2) + + random3, err := randomFn(48) + if err != nil { + return nil, fmt.Errorf("%w: %s", errRandomBytes, err.Error()) + } + copy(preMaster[:], random3) + return &KeySource{ + R1: r1, + R2: r2, + PreMaster: preMaster, + }, nil +} diff --git a/internal/session/manager.go b/internal/session/manager.go new file mode 100644 index 00000000..8ee469e6 --- /dev/null +++ b/internal/session/manager.go @@ -0,0 +1,355 @@ +package session + +import ( + "errors" + "fmt" + "math" + "strconv" + "strings" + "sync" + + "github.com/ooni/minivpn/internal/model" + "github.com/ooni/minivpn/internal/optional" + "github.com/ooni/minivpn/internal/runtimex" +) + +// SessionNegotiationState is the state of the session negotiation. +type SessionNegotiationState int + +const ( + // S_ERROR means there was some form of protocol error. + S_ERROR = SessionNegotiationState(iota) - 1 + + // S_UNDER is the undefined state. + S_UNDEF + + // S_INITIAL means we're ready to begin the three-way handshake. + S_INITIAL + + // S_PRE_START means we're waiting for acknowledgment from the remote. + S_PRE_START + + // S_START means we've done the three-way handshake. + S_START + + // S_SENT_KEY means we have sent the local part of the key_source2 random material. + S_SENT_KEY + + // S_GOT_KEY means we have got the remote part of key_source2. + S_GOT_KEY + + // S_ACTIVE means the control channel was established. + S_ACTIVE + + // S_GENERATED_KEYS means the data channel keys have been generated. + S_GENERATED_KEYS +) + +// String maps a [SessionNegotiationState] to a string. +func (sns SessionNegotiationState) String() string { + switch sns { + case S_UNDEF: + return "S_UNDEF" + case S_INITIAL: + return "S_INITIAL" + case S_PRE_START: + return "S_PRE_START" + case S_START: + return "S_START" + case S_SENT_KEY: + return "S_SENT_KEY" + case S_GOT_KEY: + return "S_GOT_KEY" + case S_ACTIVE: + return "S_ACTIVE" + case S_GENERATED_KEYS: + return "S_GENERATED_KEYS" + case S_ERROR: + return "S_ERROR" + default: + return "S_INVALID" + } +} + +// Manager manages the session. The zero value is invalid. Please, construct +// using [NewManager]. This struct is concurrency safe. +type Manager struct { + keyID uint8 + keys []*DataChannelKey + localControlPacketID model.PacketID + localDataPacketID model.PacketID + localSessionID model.SessionID + logger model.Logger + mu sync.Mutex + negState SessionNegotiationState + remoteSessionID optional.Value[model.SessionID] + tunnelInfo model.TunnelInfo + + // Ready is a channel where we signal that we can start accepting data, because we've + // successfully generated key material for the data channel. + // TODO(ainghazal): find a better way? + Ready chan any +} + +// NewManager returns a [Manager] ready to be used. +func NewManager(logger model.Logger) (*Manager, error) { + key0 := &DataChannelKey{} + sessionManager := &Manager{ + keyID: 0, + keys: []*DataChannelKey{key0}, + localSessionID: [8]byte{}, + logger: logger, + mu: sync.Mutex{}, + negState: 0, + remoteSessionID: optional.None[model.SessionID](), + tunnelInfo: model.TunnelInfo{}, + + // empirically, it seems that the reference OpenVPN server misbehaves if we initialize + // the data packet ID counter to zero. + localDataPacketID: 1, + + Ready: make(chan any), + } + + randomBytes, err := randomFn(8) + if err != nil { + return sessionManager, err + } + + sessionManager.localSessionID = (model.SessionID)(randomBytes[:8]) + + localKey, err := NewKeySource() + if err != nil { + return sessionManager, err + } + + k, err := sessionManager.ActiveKey() + if err != nil { + return sessionManager, err + } + k.local = localKey + return sessionManager, nil +} + +// LocalSessionID gets the local session ID as bytes. +func (m *Manager) LocalSessionID() []byte { + defer m.mu.Unlock() + m.mu.Lock() + return m.localSessionID[:] +} + +// RemoteSessionID gets the remote session ID as bytes. +func (m *Manager) RemoteSessionID() []byte { + defer m.mu.Unlock() + m.mu.Lock() + rs := m.remoteSessionID + if !rs.IsNone() { + val := rs.Unwrap() + return val[:] + } + return nil +} + +// IsRemoteSessionIDSet returns whether we've set the remote session ID. +func (m *Manager) IsRemoteSessionIDSet() bool { + defer m.mu.Unlock() + m.mu.Lock() + return !m.remoteSessionID.IsNone() +} + +// ErrNoRemoteSessionID indicates we are missing the remote session ID. +var ErrNoRemoteSessionID = errors.New("missing remote session ID") + +// NewACKForPacket creates a new ACK for the given packet. +func (m *Manager) NewACKForPacket(packet *model.Packet) (*model.Packet, error) { + defer m.mu.Unlock() + m.mu.Lock() + if m.remoteSessionID.IsNone() { + return nil, ErrNoRemoteSessionID + } + p := &model.Packet{ + Opcode: model.P_ACK_V1, + KeyID: m.keyID, + PeerID: [3]byte{}, + LocalSessionID: m.localSessionID, + ACKs: []model.PacketID{packet.ID}, + RemoteSessionID: m.remoteSessionID.Unwrap(), + ID: 0, + Payload: []byte{}, + } + return p, nil +} + +// NewPacket creates a new packet for this session. +func (m *Manager) NewPacket(opcode model.Opcode, payload []byte) (*model.Packet, error) { + defer m.mu.Unlock() + m.mu.Lock() + // TODO: consider unifying with ACKing code + packet := model.NewPacket( + opcode, + m.keyID, + payload, + ) + copy(packet.LocalSessionID[:], m.localSessionID[:]) + pid, err := func() (model.PacketID, error) { + if opcode.IsControl() { + return m.localControlPacketIDLocked() + } else { + return m.localDataPacketIDLocked() + } + }() + if err != nil { + return nil, err + } + packet.ID = pid + if !m.remoteSessionID.IsNone() { + packet.RemoteSessionID = m.remoteSessionID.Unwrap() + } + return packet, nil +} + +var ErrExpiredKey = errors.New("expired key") + +// LocalDataPacketID returns an unique Packet ID for the Data Channel. It +// increments the counter for the local data packet ID. +func (m *Manager) LocalDataPacketID() (model.PacketID, error) { + defer m.mu.Unlock() + m.mu.Lock() + return m.localDataPacketIDLocked() +} + +// localDataPacketIDLocked returns an unique Packet ID for the Data Channel. It +// increments the counter for the local data packet ID. +func (m *Manager) localDataPacketIDLocked() (model.PacketID, error) { + pid := m.localDataPacketID + if pid == math.MaxUint32 { + // we reached the max packetID, increment will overflow + return 0, ErrExpiredKey + } + m.localDataPacketID++ + return pid, nil +} + +// localControlPacketIDLocked returns an unique Packet ID for the Control Channel. It +// increments the counter for the local control packet ID. +func (m *Manager) localControlPacketIDLocked() (model.PacketID, error) { + pid := m.localControlPacketID + if pid == math.MaxUint32 { + // we reached the max packetID, increment will overflow + return 0, ErrExpiredKey + } + m.localControlPacketID++ + return pid, nil +} + +// NegotiationState returns the state of the negotiation. +func (m *Manager) NegotiationState() SessionNegotiationState { + defer m.mu.Unlock() + m.mu.Lock() + return m.negState +} + +// SetNegotiationState sets the state of the negotiation. +func (m *Manager) SetNegotiationState(sns SessionNegotiationState) { + defer m.mu.Unlock() + m.mu.Lock() + m.logger.Infof("[@] %s -> %s", m.negState, sns) + m.negState = sns + if sns == S_GENERATED_KEYS { + m.Ready <- true + } +} + +// ActiveKey returns the dataChannelKey that is actively being used. +func (m *Manager) ActiveKey() (*DataChannelKey, error) { + defer m.mu.Unlock() + m.mu.Lock() + if len(m.keys) > math.MaxUint8 || m.keyID >= uint8(len(m.keys)) { + return nil, fmt.Errorf("%w: %s", errDataChannelKey, "no such key id") + } + dck := m.keys[m.keyID] + // TODO(bassosimone): the following code would prevent us from + // creating a new session at the beginning--refactor? + /* + if !dck.Ready() { + return nil, fmt.Errorf("%w: %s", errDataChannelKey, "not ready") + } + */ + return dck, nil +} + +// SetRemoteSessionID sets the remote session ID. +func (m *Manager) SetRemoteSessionID(remoteSessionID model.SessionID) { + defer m.mu.Unlock() + m.mu.Lock() + runtimex.Assert(m.remoteSessionID.IsNone(), "SetRemoteSessionID called more than once") + m.remoteSessionID = optional.Some(remoteSessionID) +} + +func (m *Manager) CurrentKeyID() uint8 { + defer m.mu.Unlock() + m.mu.Lock() + return m.keyID +} + +// InitTunnelInfo initializes TunnelInfo from data obtained from the auth response. +func (m *Manager) InitTunnelInfo(remoteOption string) error { + defer m.mu.Unlock() + m.mu.Lock() + ti, err := newTunnelInfoFromRemoteOptionsString(remoteOption) + if err != nil { + return err + } + m.tunnelInfo = *ti + m.logger.Infof("Tunnel MTU: %v", m.tunnelInfo.MTU) + return nil +} + +// newTunnelInfoFromRemoteOptionsString parses the options string returned by +// server. It returns a new tunnelInfo object where the needed fields have been +// updated. At the moment, we only parse the tun-mtu parameter. +func newTunnelInfoFromRemoteOptionsString(remoteOpts string) (*model.TunnelInfo, error) { + t := &model.TunnelInfo{} + opts := strings.Split(remoteOpts, ",") + for _, opt := range opts { + vals := strings.Split(opt, " ") + if len(vals) < 2 { + continue + } + k, v := vals[0], vals[1:] + if k == "tun-mtu" { + mtu, err := strconv.Atoi(v[0]) + if err != nil { + return nil, err + } + t.MTU = mtu + } + } + return t, nil +} + +// UpdateTunnelInfo updates the internal tunnel info from the push response message +func (m *Manager) UpdateTunnelInfo(ti *model.TunnelInfo) { + defer m.mu.Unlock() + m.mu.Lock() + + m.tunnelInfo.IP = ti.IP + m.tunnelInfo.GW = ti.GW + m.tunnelInfo.PeerID = ti.PeerID + + m.logger.Infof("Tunnel IP: %s", ti.IP) + m.logger.Infof("Gateway IP: %s", ti.GW) + m.logger.Infof("Peer ID: %d", ti.PeerID) +} + +// TunnelInfo returns a copy the current TunnelInfo +func (m *Manager) TunnelInfo() model.TunnelInfo { + defer m.mu.Unlock() + m.mu.Lock() + return model.TunnelInfo{ + MTU: m.tunnelInfo.MTU, + IP: m.tunnelInfo.IP, + GW: m.tunnelInfo.GW, + PeerID: m.tunnelInfo.PeerID, + } +}