From 28585f492c9bf64d573f4ebaa7e0bc0a6209ba4d Mon Sep 17 00:00:00 2001 From: Tiago Silva Date: Fri, 30 Aug 2024 18:57:20 +0100 Subject: [PATCH 1/3] [kube] avoid terminal resize loop on macOS Terminal app (#46068) When performing a quick resize, the macOS Terminal generates several intermediate window sizes between the original and final terminal sizes. During this resizing process, Teleport receives all the intermediate sizes and broadcasts them to all participants in the session. If the resizing information is sent back to the party that initiated the resize, it can create a feedback loop where the terminal and Teleport repeatedly attempt to enforce different window sizes. This PR prevents Teleport from resending the resize event to the party that triggered the initial resize. Fixes #30714 Signed-off-by: Tiago Silva --- lib/kube/proxy/forwarder.go | 2 +- lib/kube/proxy/sess.go | 95 ++++++++++++++++++++++++++++++------- 2 files changed, 79 insertions(+), 18 deletions(-) diff --git a/lib/kube/proxy/forwarder.go b/lib/kube/proxy/forwarder.go index 6d0f3ed5a277..98160c470d98 100644 --- a/lib/kube/proxy/forwarder.go +++ b/lib/kube/proxy/forwarder.go @@ -1194,7 +1194,7 @@ func (f *Forwarder) join(ctx *authContext, w http.ResponseWriter, req *http.Requ return trace.Wrap(err) } - client := &websocketClientStreams{stream} + client := &websocketClientStreams{uuid.New(), stream} party := newParty(*ctx, stream.Mode, client) err = session.join(party, true /* emitSessionJoinEvent */) diff --git a/lib/kube/proxy/sess.go b/lib/kube/proxy/sess.go index cc1f9a361e46..0a98df0d07cc 100644 --- a/lib/kube/proxy/sess.go +++ b/lib/kube/proxy/sess.go @@ -66,10 +66,11 @@ const ( // remoteClient is either a kubectl or websocket client. type remoteClient interface { + queueID() uuid.UUID stdinStream() io.Reader stdoutStream() io.Writer stderrStream() io.Writer - resizeQueue() <-chan *remotecommand.TerminalSize + resizeQueue() <-chan terminalResizeMessage resize(size *remotecommand.TerminalSize) error forceTerminate() <-chan struct{} sendStatus(error) error @@ -77,9 +78,14 @@ type remoteClient interface { } type websocketClientStreams struct { + id uuid.UUID stream *streamproto.SessionStream } +func (p *websocketClientStreams) queueID() uuid.UUID { + return p.id +} + func (p *websocketClientStreams) stdinStream() io.Reader { return p.stream } @@ -92,8 +98,26 @@ func (p *websocketClientStreams) stderrStream() io.Writer { return p.stream } -func (p *websocketClientStreams) resizeQueue() <-chan *remotecommand.TerminalSize { - return p.stream.ResizeQueue() +func (p *websocketClientStreams) resizeQueue() <-chan terminalResizeMessage { + ch := make(chan terminalResizeMessage) + go func() { + defer close(ch) + for { + select { + case <-p.stream.Done(): + return + case size := <-p.stream.ResizeQueue(): + if size == nil { + return + } + ch <- terminalResizeMessage{ + size: size, + source: p.id, + } + } + } + }() + return ch } func (p *websocketClientStreams) resize(size *remotecommand.TerminalSize) error { @@ -113,6 +137,7 @@ func (p *websocketClientStreams) Close() error { } type kubeProxyClientStreams struct { + id uuid.UUID proxy *remoteCommandProxy sizeQueue *termQueue stdin io.Reader @@ -126,6 +151,7 @@ func newKubeProxyClientStreams(proxy *remoteCommandProxy) *kubeProxyClientStream options := proxy.options() return &kubeProxyClientStreams{ + id: uuid.New(), proxy: proxy, stdin: options.Stdin, stdout: options.Stdout, @@ -135,6 +161,10 @@ func newKubeProxyClientStreams(proxy *remoteCommandProxy) *kubeProxyClientStream } } +func (p *kubeProxyClientStreams) queueID() uuid.UUID { + return p.id +} + func (p *kubeProxyClientStreams) stdinStream() io.Reader { return p.stdin } @@ -147,8 +177,8 @@ func (p *kubeProxyClientStreams) stderrStream() io.Writer { return p.stderr } -func (p *kubeProxyClientStreams) resizeQueue() <-chan *remotecommand.TerminalSize { - ch := make(chan *remotecommand.TerminalSize) +func (p *kubeProxyClientStreams) resizeQueue() <-chan terminalResizeMessage { + ch := make(chan terminalResizeMessage) p.wg.Add(1) go func() { defer p.wg.Done() @@ -157,8 +187,9 @@ func (p *kubeProxyClientStreams) resizeQueue() <-chan *remotecommand.TerminalSiz if size == nil { return } + select { - case ch <- size: + case ch <- terminalResizeMessage{size, p.id}: // Check if the sizeQueue was already terminated. case <-p.sizeQueue.done.Done(): return @@ -191,21 +222,28 @@ func (p *kubeProxyClientStreams) Close() error { return trace.Wrap(p.proxy.Close()) } +// terminalResizeMessage is a message that contains the terminal size and the source of the resize event. +type terminalResizeMessage struct { + size *remotecommand.TerminalSize + source uuid.UUID +} + // multiResizeQueue is a merged queue of multiple terminal size queues. type multiResizeQueue struct { - queues map[string]<-chan *remotecommand.TerminalSize + queues map[string]<-chan terminalResizeMessage cases []reflect.SelectCase - callback func(*remotecommand.TerminalSize) + callback func(terminalResizeMessage) mutex sync.Mutex parentCtx context.Context reloadCtx context.Context reloadCancel context.CancelFunc + lastSize *remotecommand.TerminalSize } func newMultiResizeQueue(parentCtx context.Context) *multiResizeQueue { ctx, cancel := context.WithCancel(parentCtx) return &multiResizeQueue{ - queues: make(map[string]<-chan *remotecommand.TerminalSize), + queues: make(map[string]<-chan terminalResizeMessage), parentCtx: parentCtx, reloadCtx: ctx, reloadCancel: cancel, @@ -232,11 +270,17 @@ func (r *multiResizeQueue) rebuild() { } } +func (r *multiResizeQueue) getLastSize() *remotecommand.TerminalSize { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.lastSize +} + func (r *multiResizeQueue) close() { r.reloadCancel() } -func (r *multiResizeQueue) add(id string, queue <-chan *remotecommand.TerminalSize) { +func (r *multiResizeQueue) add(id string, queue <-chan terminalResizeMessage) { r.mutex.Lock() defer r.mutex.Unlock() r.queues[id] = queue @@ -268,9 +312,12 @@ loop: } } - size := value.Interface().(*remotecommand.TerminalSize) + size := value.Interface().(terminalResizeMessage) r.callback(size) - return size + r.mutex.Lock() + r.lastSize = size.size + r.mutex.Unlock() + return size.size } } @@ -684,20 +731,24 @@ func (s *session) lockedSetupLaunch(request *remoteCommandRequest, eventPodMeta sessionStart := s.forwarder.cfg.Clock.Now().UTC() if !s.sess.noAuditEvents { - s.terminalSizeQueue.callback = func(resize *remotecommand.TerminalSize) { + s.terminalSizeQueue.callback = func(termSize terminalResizeMessage) { s.mu.Lock() defer s.mu.Unlock() for id, p := range s.parties { - err := p.Client.resize(resize) + // Skip the party that sent the resize event to avoid a resize loop. + if p.Client.queueID() == termSize.source { + continue + } + err := p.Client.resize(termSize.size) if err != nil { s.log.WithError(err).Errorf("Failed to resize client: %v", id.String()) } } params := tsession.TerminalParams{ - W: int(resize.Width), - H: int(resize.Height), + W: int(termSize.size.Width), + H: int(termSize.size.Height), } resizeEvent, err := s.recorder.PrepareSessionEvent(&apievents.Resize{ @@ -728,7 +779,7 @@ func (s *session) lockedSetupLaunch(request *remoteCommandRequest, eventPodMeta } } } else { - s.terminalSizeQueue.callback = func(resize *remotecommand.TerminalSize) {} + s.terminalSizeQueue.callback = func(resize terminalResizeMessage) {} } // If we get here, it means we are going to have a session.end event. @@ -940,6 +991,16 @@ func (s *session) join(p *party, emitJoinEvent bool) error { s.partiesHistorical[p.ID] = p s.terminalSizeQueue.add(stringID, p.Client.resizeQueue()) + // If the session is already running, we need to resize the new party's terminal + // to match the last terminal size. + // This is done to ensure that the new party's terminal is the same size as the + // other parties' terminals and no discrepancies are present. + if lastQueueSize := s.terminalSizeQueue.getLastSize(); lastQueueSize != nil { + if err := p.Client.resize(lastQueueSize); err != nil { + s.log.WithError(err).Errorf("Failed to resize client: %v", stringID) + } + } + if p.Mode == types.SessionPeerMode { s.io.AddReader(stringID, p.Client.stdinStream()) } From 0983ea291199c5e10a0d67f70444b0e4fafb5352 Mon Sep 17 00:00:00 2001 From: Erik Tate Date: Fri, 30 Aug 2024 15:57:07 -0400 Subject: [PATCH 2/3] adds host_sudoers example to the role spec reference (#46044) --- docs/pages/includes/role-spec.mdx | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/pages/includes/role-spec.mdx b/docs/pages/includes/role-spec.mdx index 8968e8e96c0b..398d5851e3ab 100644 --- a/docs/pages/includes/role-spec.mdx +++ b/docs/pages/includes/role-spec.mdx @@ -146,6 +146,15 @@ spec: # is not 'off'. host_groups: [ubuntu, nginx, other] + # List of entries to include in a temporary sudoers file created in + # `/etc/sudoers.d`. The records are removed on session close. + host_sudoers: [ + # This line will allow the login user to run `systemctl restart nginx.service` + # as root without requiring a password. The sudoers entry will be prefixed + # with the logged in username. + "ALL = (root) NOPASSWD: /usr/bin/systemctl restart nginx.service" + ] + # kubernetes_groups specifies Kubernetes groups a user with this role will assume. # You can refer to a SAML/OIDC trait via the 'external' property bag. # This allows you to specify Kubernetes group membership in an identity manager: From e8fff248171eb7e41999b21e9dbc8d3b19bcd7f9 Mon Sep 17 00:00:00 2001 From: Forrest <30576607+fspmarshall@users.noreply.github.com> Date: Fri, 30 Aug 2024 14:04:17 -0700 Subject: [PATCH 3/3] fix session corruption (#46033) --- .../s3sessions/s3handler_thirdparty_test.go | 3 + lib/events/sessionlog.go | 7 +- lib/events/stream.go | 51 +++++++-- lib/events/stream_test.go | 21 ++++ lib/events/test/streamsuite.go | 105 +++++++++++++++++- lib/events/testdata/corrupted-session | Bin 0 -> 2097571 bytes 6 files changed, 174 insertions(+), 13 deletions(-) create mode 100644 lib/events/testdata/corrupted-session diff --git a/lib/events/s3sessions/s3handler_thirdparty_test.go b/lib/events/s3sessions/s3handler_thirdparty_test.go index d4aba60f1019..1eede3cb4910 100644 --- a/lib/events/s3sessions/s3handler_thirdparty_test.go +++ b/lib/events/s3sessions/s3handler_thirdparty_test.go @@ -61,6 +61,9 @@ func TestThirdpartyStreams(t *testing.T) { t.Run("StreamManyParts", func(t *testing.T) { test.Stream(t, handler) }) + t.Run("StreamWithPadding", func(t *testing.T) { + test.StreamWithPadding(t, handler) + }) t.Run("UploadDownload", func(t *testing.T) { test.UploadDownload(t, handler) }) diff --git a/lib/events/sessionlog.go b/lib/events/sessionlog.go index a9a0f15b8b4b..f239a1a110e4 100644 --- a/lib/events/sessionlog.go +++ b/lib/events/sessionlog.go @@ -106,7 +106,7 @@ func newGzipWriter(writer io.WriteCloser) *gzipWriter { // gzipReader wraps file, on close close both gzip writer and file type gzipReader struct { io.ReadCloser - inner io.Closer + inner io.ReadCloser } // Close closes file and gzip writer @@ -128,6 +128,11 @@ func newGzipReader(reader io.ReadCloser) (*gzipReader, error) { if err != nil { return nil, trace.Wrap(err) } + // older bugged versions of teleport would sometimes incorrectly inject padding bytes into + // the gzip section of the archive. this causes gzip readers with multistream enabled (the + // default behavior) to fail. we disable multistream here in order to ensure that the gzip + // reader halts when it reaches the end of the current (only) valid gzip entry. + gzReader.Multistream(false) return &gzipReader{ ReadCloser: gzReader, inner: reader, diff --git a/lib/events/stream.go b/lib/events/stream.go index df5d3ccc5ab0..f92bb01996db 100644 --- a/lib/events/stream.go +++ b/lib/events/stream.go @@ -91,6 +91,10 @@ type ProtoStreamerConfig struct { MinUploadBytes int64 // ConcurrentUploads sets concurrent uploads per stream ConcurrentUploads int + // ForceFlush is used in tests to force a flush of an in-progress slice. Note that + // sending on this channel just forces a single flush for whichever upload happens + // to receive the signal first, so this may not be suitable for concurrent tests. + ForceFlush chan struct{} } // CheckAndSetDefaults checks and sets streamer defaults @@ -139,6 +143,7 @@ func (s *ProtoStreamer) CreateAuditStreamForUpload(ctx context.Context, sid sess Uploader: s.cfg.Uploader, MinUploadBytes: s.cfg.MinUploadBytes, ConcurrentUploads: s.cfg.ConcurrentUploads, + ForceFlush: s.cfg.ForceFlush, }) } @@ -189,6 +194,10 @@ type ProtoStreamConfig struct { // after which streamer flushes the data to the uploader // to avoid data loss InactivityFlushPeriod time.Duration + // ForceFlush is used in tests to force a flush of an in-progress slice. Note that + // sending on this channel just forces a single flush for whichever upload happens + // to receive the signal first, so this may not be suitable for concurrent tests. + ForceFlush chan struct{} // Clock is used to override time in tests Clock clockwork.Clock // ConcurrentUploads sets concurrent uploads per stream @@ -546,6 +555,12 @@ func (w *sliceWriter) receiveAndUpload() error { delete(w.activeUploads, part.Number) w.updateCompletedParts(*part, upload.lastEventIndex) + case <-w.proto.cfg.ForceFlush: + if w.current != nil { + if err := w.startUploadCurrentSlice(); err != nil { + return trace.Wrap(err) + } + } case <-flushCh: now := clock.Now().UTC() inactivityPeriod := now.Sub(lastEvent) @@ -735,14 +750,18 @@ func (w *sliceWriter) startUpload(partNumber int64, slice *slice) (*activeUpload }) var retry retryutils.Retry + + // create reader once before the retry loop. in the event of an error, the reader must + // be reset via Seek rather than recreated. + reader, err := slice.reader() + if err != nil { + activeUpload.setError(err) + return + } + for i := 0; i < defaults.MaxIterationLimit; i++ { log := log.WithField("attempt", i) - reader, err := slice.reader() - if err != nil { - activeUpload.setError(err) - return - } part, err := w.proto.cfg.Uploader.UploadPart(w.proto.cancelCtx, w.proto.cfg.Upload, partNumber, reader) if err == nil { activeUpload.setPart(*part) @@ -772,10 +791,13 @@ func (w *sliceWriter) startUpload(partNumber int64, slice *slice) (*activeUpload } } retry.Inc() + + // reset reader to the beginning of the slice so it can be re-read if _, err := reader.Seek(0, 0); err != nil { activeUpload.setError(err) return } + select { case <-retry.After(): log.WithError(err).Debugf("Back off period for retry has passed. Retrying") @@ -833,8 +855,9 @@ type slice struct { lastEventIndex int64 } -// reader returns a reader for the bytes written, -// no writes should be done after this method is called +// reader returns a reader for the bytes written, no writes should be done after this +// method is called and this method should be called at most once per slice, otherwise +// the resulting recording will be corrupted. func (s *slice) reader() (io.ReadSeeker, error) { if err := s.writer.Close(); err != nil { return nil, trace.Wrap(err) @@ -1071,6 +1094,20 @@ func (r *ProtoReader) Read(ctx context.Context) (apievents.AuditEvent, error) { if err != io.EOF { return nil, r.setError(trace.ConvertSystemError(err)) } + + // due to a bug in older versions of teleport it was possible that padding + // bytes would end up inside of the gzip section of the archive. we should + // skip any dangling data in the gzip secion. + n, err := io.CopyBuffer(io.Discard, r.gzipReader.inner, r.messageBytes[:]) + if err != nil { + return nil, r.setError(trace.ConvertSystemError(err)) + } + + if n != 0 { + // log the number of bytes that were skipped + log.WithField("length", n).Debug("skipped dangling data in session recording section") + } + // reached the end of the current part, but not necessarily // the end of the stream if err := r.gzipReader.Close(); err != nil { diff --git a/lib/events/stream_test.go b/lib/events/stream_test.go index e420098f8eff..94f8a1c445aa 100644 --- a/lib/events/stream_test.go +++ b/lib/events/stream_test.go @@ -17,6 +17,7 @@ package events_test import ( "context" "errors" + "os" "strings" "testing" "time" @@ -198,6 +199,26 @@ func TestProtoStreamLargeEvent(t *testing.T) { require.NoError(t, stream.Complete(ctx)) } +// TestReadCorruptedRecording tests that the streamer can successfully decode the kind of corrupted +// recordings that some older bugged versions of teleport might end up producing when under heavy load/throttling. +func TestReadCorruptedRecording(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + f, err := os.Open("testdata/corrupted-session") + require.NoError(t, err) + defer f.Close() + + reader := events.NewProtoReader(f) + defer reader.Close() + + events, err := reader.ReadAll(ctx) + require.NoError(t, err) + + // verify that the expected number of events are extracted + require.Len(t, events, 12) +} + func makeQueryEvent(id string, query string) *apievents.DatabaseSessionQuery { return &apievents.DatabaseSessionQuery{ Metadata: apievents.Metadata{ diff --git a/lib/events/test/streamsuite.go b/lib/events/test/streamsuite.go index 7b9e8b241204..fca49071373d 100644 --- a/lib/events/test/streamsuite.go +++ b/lib/events/test/streamsuite.go @@ -16,10 +16,14 @@ package test import ( "context" + "fmt" + "io" "os" + "sync" "testing" "time" + "github.com/gravitational/trace" "github.com/stretchr/testify/require" "github.com/gravitational/teleport/lib/events" @@ -27,6 +31,42 @@ import ( "github.com/gravitational/teleport/lib/session" ) +type flakyHandler struct { + events.MultipartHandler + mu sync.Mutex + shouldFlake bool + flakedParts map[int64]bool +} + +func newFlakyHandler(handler events.MultipartHandler) *flakyHandler { + return &flakyHandler{ + MultipartHandler: handler, + flakedParts: make(map[int64]bool), + } +} + +func (f *flakyHandler) UploadPart(ctx context.Context, upload events.StreamUpload, partNumber int64, partBody io.ReadSeeker) (*events.StreamPart, error) { + var shouldFlake bool + f.mu.Lock() + if f.shouldFlake && !f.flakedParts[partNumber] { + shouldFlake = true + f.flakedParts[partNumber] = true + } + f.mu.Unlock() + + if shouldFlake { + return nil, trace.Errorf("flakeity flake flake") + } + + return f.MultipartHandler.UploadPart(ctx, upload, partNumber, partBody) +} + +func (f *flakyHandler) setFlakeUpload(flake bool) { + f.mu.Lock() + defer f.mu.Unlock() + f.shouldFlake = flake +} + // StreamParams configures parameters of a stream test suite type StreamParams struct { // PrintEvents is amount of print events to generate @@ -35,19 +75,34 @@ type StreamParams struct { ConcurrentUploads int // MinUploadBytes is minimum required upload bytes MinUploadBytes int64 + // Flaky is a flag that indicates that the handler should be flaky + Flaky bool + // ForceFlush is a flag that indicates that the handler should be forced to flush + // partially filled slices during event input. + ForceFlush bool } // StreamSinglePart tests stream upload and subsequent download and reads the results func StreamSinglePart(t *testing.T, handler events.MultipartHandler) { - StreamWithParameters(t, handler, StreamParams{ + StreamWithPermutedParameters(t, handler, StreamParams{ PrintEvents: 1024, MinUploadBytes: 1024 * 1024, }) } +// StreamWithPadding tests stream upload in a case where significant padding must be added. Note that +// in practice padding is only necessarily added in the 'ForceFlush' permutation as single-slice uploads +// do not require padding. +func StreamWithPadding(t *testing.T, handler events.MultipartHandler) { + StreamWithPermutedParameters(t, handler, StreamParams{ + PrintEvents: 10, + MinUploadBytes: 1024 * 1024, + }) +} + // Stream tests stream upload and subsequent download and reads the results func Stream(t *testing.T, handler events.MultipartHandler) { - StreamWithParameters(t, handler, StreamParams{ + StreamWithPermutedParameters(t, handler, StreamParams{ PrintEvents: 1024, MinUploadBytes: 1024, ConcurrentUploads: 2, @@ -56,7 +111,7 @@ func Stream(t *testing.T, handler events.MultipartHandler) { // StreamManyParts tests stream upload and subsequent download and reads the results func StreamManyParts(t *testing.T, handler events.MultipartHandler) { - StreamWithParameters(t, handler, StreamParams{ + StreamWithPermutedParameters(t, handler, StreamParams{ PrintEvents: 8192, MinUploadBytes: 1024, ConcurrentUploads: 64, @@ -73,6 +128,27 @@ func StreamResumeManyParts(t *testing.T, handler events.MultipartHandler) { }) } +// StreamWithPermutedParameters tests stream upload and subsequent download and reads the results, repeating +// the process with various permutations of flake and flush parameters in order to better cover padding and +// retry logic, which are easy to accidentally fail to cover. +func StreamWithPermutedParameters(t *testing.T, handler events.MultipartHandler, params StreamParams) { + cases := []struct{ Flaky, ForceFlush bool }{ + {Flaky: false, ForceFlush: false}, + {Flaky: true, ForceFlush: false}, + {Flaky: false, ForceFlush: true}, + {Flaky: true, ForceFlush: true}, + } + + for _, cc := range cases { + t.Run(fmt.Sprintf("Flaky=%v,ForceFlush=%v", cc.Flaky, cc.ForceFlush), func(t *testing.T) { + pc := params + pc.Flaky = cc.Flaky + pc.ForceFlush = cc.ForceFlush + StreamWithParameters(t, handler, pc) + }) + } +} + // StreamWithParameters tests stream upload and subsequent download and reads the results func StreamWithParameters(t *testing.T, handler events.MultipartHandler, params StreamParams) { ctx := context.TODO() @@ -80,10 +156,15 @@ func StreamWithParameters(t *testing.T, handler events.MultipartHandler, params inEvents := eventstest.GenerateTestSession(eventstest.SessionParams{PrintEvents: params.PrintEvents}) sid := session.ID(inEvents[0].(events.SessionMetadataGetter).GetSessionID()) + forceFlush := make(chan struct{}) + + wrappedHandler := newFlakyHandler(handler) + streamer, err := events.NewProtoStreamer(events.ProtoStreamerConfig{ - Uploader: handler, + Uploader: wrappedHandler, MinUploadBytes: params.MinUploadBytes, ConcurrentUploads: params.ConcurrentUploads, + ForceFlush: forceFlush, }) require.Nil(t, err) @@ -97,7 +178,21 @@ func StreamWithParameters(t *testing.T, handler events.MultipartHandler, params t.Fatalf("Timed out waiting for status update.") } - for _, event := range inEvents { + // if enabled, flake causes the first upload attempt for each multipart upload part + // to fail. necessary in order to cover upload retry logic, which has historically been + // a source of bugs. + wrappedHandler.setFlakeUpload(params.Flaky) + + timeout := time.After(time.Minute) + + for i, event := range inEvents { + if params.ForceFlush && i%(len(inEvents)/3) == 0 { + select { + case forceFlush <- struct{}{}: + case <-timeout: + t.Fatalf("Timed out waiting for force flush.") + } + } err := stream.RecordEvent(ctx, eventstest.PrepareEvent(event)) require.Nil(t, err) } diff --git a/lib/events/testdata/corrupted-session b/lib/events/testdata/corrupted-session new file mode 100644 index 0000000000000000000000000000000000000000..da33fc8a28de2f731b169f549d656c6df8a475bb GIT binary patch literal 2097571 zcmeIv`F9Ui769-sV~I7Qwh^aBhNv!<*xF->wN5ONioKXBW$cWd6cdz+Wx`k@*6LBz zQffL?MWaZSHbyKnVoA#k!Ld)|81)?|jz3}Mr_cN0-TTgY=iPTdHxS5Q1@iaM;I(JR zK%i<;NPb!LKp{HW21n$H<>M*^3pAjo&&fZk!`-mEKa|T9Ek9;HMZbX+mCsOBj zTztDx+K|YBnTOl;+_ZmI=w~%MzgRtVSC1WQa(CB^4(YY_+kFczE%{_v&mB!N+uev+ zu;f6wUZvt%v@B8M*PVu@4G9~ZIB#QiOrOSEems3R=e5D}_m!x4>-L4*>`C{hP1}8R zY(n>x>!lL9J)HT$rbZ91d>pkjSh-I4TVujLdFAEz-pc&=orc@G6ljpqt#5kVrs^@Z zr}fMW7U_PyW#@q@+h!f_F{EYrj2fMKCdZ}3#8z9@tH{!yTOE3mv#wicd|tsO6E{q* z6tXA$^v2Xvr&9Xmd|IT@_%9Cy8xAda@7|FIX$va=;(9%GZW85jxN)xM~%Z* zw|83A|K_7weZEZ3?|$v?E)CDPH>O1I)C#*B71+Of#q2We+b_(x@WRg}2b}Hv)4^u9 zz9^cW8hrA21gLx-5y^;HDz|W^E5+Fc;009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZ zfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&U zAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C7 z2oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N z0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+ z009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBly zK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF z5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk z1PBlyK!5-N0t5&UAV7cs0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7cs z0RjXF5FkK+009C72oNAZfB*pk1PBlyK!5-N0t5&UAV7csf&Ym>f&5oPgV&zFUNtEs zKP?m-emuJP!Mf$r@3vc6vGwSXE+Id}73`ZB->BZ)-y}{-ZZ-RzwoPYjsQn~s^{PHo ziiFk(i5*$t+{gnDv+oz4v9oHi!#RP*S*NEozIlJt_(>ZQ-rw>J6C~IsMoEJFM0Hf7FjUII~IiUqjbc2`xG< z^u~m!jnxKKs`lUyxry`Z?%6QyK;F$sGiO}tm%FWAXwl&_;%;9|xcsO1gzTzY`|sPH z^I5n>PC@xT=0+)y{MB&Ss3bIwdvr zMu+t2ElU4oTd6AF?ity=LPTbxwAN9z+NQN#)wS#9^KmKlTPL*%{17#wW63?sy1ckP z=eH;8tUo#GW=eF-E9-Z?e01}_Qa)Pz&&6B1jw!ygef&!eDn>M~Hm1VPxqmBrao3vb ztE&usJN)$z#$MR^YMYHC)|9-v=+mb~_g>mtVsP_A(=YD3-DlL;vG3=6-Ky}Tih)2d On0I0G{$d3Kf&T(Z3-iwa literal 0 HcmV?d00001