From 6f953837fa4405d64f4db8085a698e746909f796 Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Sat, 1 Jun 2019 22:42:23 -0300 Subject: [PATCH 1/6] Refactor TF and TFLite model implementations into their own classes/files --- native_client/BUILD | 13 +- native_client/deepspeech.cc | 713 ++++-------------------------- native_client/deepspeech.h | 1 + native_client/modelstate.cc | 81 ++++ native_client/modelstate.h | 88 ++++ native_client/tflitemodelstate.cc | 258 +++++++++++ native_client/tflitemodelstate.h | 51 +++ native_client/tfmodelstate.cc | 214 +++++++++ native_client/tfmodelstate.h | 37 ++ 9 files changed, 819 insertions(+), 637 deletions(-) create mode 100644 native_client/modelstate.cc create mode 100644 native_client/modelstate.h create mode 100644 native_client/tflitemodelstate.cc create mode 100644 native_client/tflitemodelstate.h create mode 100644 native_client/tfmodelstate.cc create mode 100644 native_client/tfmodelstate.h diff --git a/native_client/BUILD b/native_client/BUILD index bf4e1d2654..d7813d297f 100644 --- a/native_client/BUILD +++ b/native_client/BUILD @@ -70,9 +70,20 @@ tf_cc_shared_object( srcs = ["deepspeech.cc", "deepspeech.h", "alphabet.h", + "modelstate.h", + "modelstate.cc", "ds_version.h", "ds_graph_version.h"] + - DECODER_SOURCES, + DECODER_SOURCES + + select({ + "//native_client:tflite": [ + "tflitemodelstate.h", + "tflitemodelstate.cc" + ], + "//conditions:default": [ + "tfmodelstate.h", + "tfmodelstate.cc" + ]}), copts = select({ # -fvisibility=hidden is not required on Windows, MSCV hides all declarations by default "//tensorflow:windows": ["/w"], diff --git a/native_client/deepspeech.cc b/native_client/deepspeech.cc index 74d83e5649..9955cf861c 100644 --- a/native_client/deepspeech.cc +++ b/native_client/deepspeech.cc @@ -11,17 +11,14 @@ #include "deepspeech.h" #include "alphabet.h" +#include "modelstate.h" #include "native_client/ds_version.h" -#include "native_client/ds_graph_version.h" #ifndef USE_TFLITE - #include "tensorflow/core/public/session.h" - #include "tensorflow/core/platform/env.h" - #include "tensorflow/core/util/memmapped_file_system.h" -#else // USE_TFLITE - #include "tensorflow/lite/model.h" - #include "tensorflow/lite/kernels/register.h" +#include "tfmodelstate.h" +#else +#include "tflitemodelstate.h" #endif // USE_TFLITE #include "ctcdecode/ctc_beam_search_decoder.h" @@ -36,23 +33,9 @@ #define LOGE(...) #endif // __ANDROID__ -//TODO: infer batch size from model/use dynamic batch size -constexpr unsigned int BATCH_SIZE = 1; - -constexpr unsigned int DEFAULT_SAMPLE_RATE = 16000; -constexpr unsigned int DEFAULT_WINDOW_LENGTH = DEFAULT_SAMPLE_RATE * 0.032; -constexpr unsigned int DEFAULT_WINDOW_STEP = DEFAULT_SAMPLE_RATE * 0.02; - -#ifndef USE_TFLITE - using namespace tensorflow; -#else - using namespace tflite; -#endif - using std::vector; -/* This is the actual implementation of the streaming inference API, with the - Model class just forwarding the calls to this class. +/* This is the implementation of the streaming inference API. The streaming process uses three buffers that are fed eagerly as audio data is fed in. The buffers only hold the minimum amount of data needed to do a @@ -75,17 +58,20 @@ using std::vector; API. When audio_buffer is full, features are computed from it and pushed to mfcc_buffer. When mfcc_buffer is full, the timestep is copied to batch_buffer. When batch_buffer is full, we do a single step through the acoustic model - and accumulate results in the DecoderState structure. + and accumulate the intermediate decoding state in the DecoderState structure. - When finishStream() is called, we decode the accumulated logits and return - the corresponding transcription. + When finishStream() is called, we return the corresponding transcription from + the current decoder state. */ struct StreamingState { - vector audio_buffer; - vector mfcc_buffer; - vector batch_buffer; - ModelState* model; - std::unique_ptr decoder_state; + vector audio_buffer_; + vector mfcc_buffer_; + vector batch_buffer_; + ModelState* model_; + std::unique_ptr decoder_state_; + + StreamingState(); + ~StreamingState(); void feedAudioContent(const short* buffer, unsigned int buffer_size); char* intermediateDecode(); @@ -100,133 +86,12 @@ struct StreamingState { void processBatch(const vector& buf, unsigned int n_steps); }; -struct ModelState { -#ifndef USE_TFLITE - MemmappedEnv* mmap_env; - Session* session; - GraphDef graph_def; -#else // USE_TFLITE - std::unique_ptr interpreter; - std::unique_ptr fbmodel; -#endif // USE_TFLITE - unsigned int ncep; - unsigned int ncontext; - Alphabet* alphabet; - Scorer* scorer; - unsigned int beam_width; - unsigned int n_steps; - unsigned int n_context; - unsigned int n_features; - unsigned int mfcc_feats_per_timestep; - unsigned int sample_rate; - unsigned int audio_win_len; - unsigned int audio_win_step; - -#ifdef USE_TFLITE - size_t previous_state_size; - std::unique_ptr previous_state_c_; - std::unique_ptr previous_state_h_; - - int input_node_idx; - int previous_state_c_idx; - int previous_state_h_idx; - int input_samples_idx; - - int logits_idx; - int new_state_c_idx; - int new_state_h_idx; - int mfccs_idx; - - std::vector acoustic_exec_plan; - std::vector mfcc_exec_plan; -#endif - - ModelState(); - ~ModelState(); - - /** - * @brief Perform decoding of the logits, using basic CTC decoder or - * CTC decoder with KenLM enabled - * - * @return String representing the decoded text. - */ - char* decode(DecoderState* state); - - /** - * @brief Perform decoding of the logits, using basic CTC decoder or - * CTC decoder with KenLM enabled - * - * @return Vector of Output structs directly from the CTC decoder for additional processing. - */ - vector decode_raw(DecoderState* state); - - /** - * @brief Return character-level metadata including letter timings. - * - * - * @return Metadata struct containing MetadataItem structs for each character. - * The user is responsible for freeing Metadata by calling DS_FreeMetadata(). - */ - Metadata* decode_metadata(DecoderState* state); - - /** - * @brief Do a single inference step in the acoustic model, with: - * input=mfcc - * input_lengths=[n_frames] - * - * @param mfcc batch input data - * @param n_frames number of timesteps in the data - * - * @param[out] output_logits Where to store computed logits. - */ - void infer(const float* mfcc, unsigned int n_frames, vector& logits_output); - - void compute_mfcc(const vector& audio_buffer, vector& mfcc_output); -}; - -ModelState::ModelState() - : -#ifndef USE_TFLITE - mmap_env(nullptr) - , session(nullptr) -#else // USE_TFLITE - interpreter(nullptr) - , fbmodel(nullptr) -#endif // USE_TFLITE - , ncep(0) - , ncontext(0) - , alphabet(nullptr) - , scorer(nullptr) - , beam_width(0) - , n_steps(-1) - , n_context(-1) - , n_features(-1) - , mfcc_feats_per_timestep(-1) - , sample_rate(DEFAULT_SAMPLE_RATE) - , audio_win_len(DEFAULT_WINDOW_LENGTH) - , audio_win_step(DEFAULT_WINDOW_STEP) -#ifdef USE_TFLITE - , previous_state_size(0) - , previous_state_c_(nullptr) - , previous_state_h_(nullptr) -#endif +StreamingState::StreamingState() { } -ModelState::~ModelState() +StreamingState::~StreamingState() { -#ifndef USE_TFLITE - if (session) { - Status status = session->Close(); - if (!status.ok()) { - std::cerr << "Error closing TensorFlow session: " << status << std::endl; - } - } - delete mmap_env; -#endif // USE_TFLITE - - delete scorer; - delete alphabet; } template @@ -243,19 +108,19 @@ StreamingState::feedAudioContent(const short* buffer, { // Consume all the data that was passed in, processing full buffers if needed while (buffer_size > 0) { - while (buffer_size > 0 && audio_buffer.size() < model->audio_win_len) { + while (buffer_size > 0 && audio_buffer_.size() < model_->audio_win_len_) { // Convert i16 sample into f32 float multiplier = 1.0f / (1 << 15); - audio_buffer.push_back((float)(*buffer) * multiplier); + audio_buffer_.push_back((float)(*buffer) * multiplier); ++buffer; --buffer_size; } // If the buffer is full, process and shift it - if (audio_buffer.size() == model->audio_win_len) { - processAudioWindow(audio_buffer); + if (audio_buffer_.size() == model_->audio_win_len_) { + processAudioWindow(audio_buffer_); // Shift data by one step - shift_buffer_left(audio_buffer, model->audio_win_step); + shift_buffer_left(audio_buffer_, model_->audio_win_step_); } // Repeat until buffer empty @@ -265,21 +130,21 @@ StreamingState::feedAudioContent(const short* buffer, char* StreamingState::intermediateDecode() { - return model->decode(decoder_state.get()); + return model_->decode(decoder_state_.get()); } char* StreamingState::finishStream() { finalizeStream(); - return model->decode(decoder_state.get()); + return model_->decode(decoder_state_.get()); } Metadata* StreamingState::finishStreamWithMetadata() { finalizeStream(); - return model->decode_metadata(decoder_state.get()); + return model_->decode_metadata(decoder_state_.get()); } void @@ -287,8 +152,8 @@ StreamingState::processAudioWindow(const vector& buf) { // Compute MFCC features vector mfcc; - mfcc.reserve(model->n_features); - model->compute_mfcc(buf, mfcc); + mfcc.reserve(model_->n_features_); + model_->compute_mfcc(buf, mfcc); pushMfccBuffer(mfcc); } @@ -296,23 +161,23 @@ void StreamingState::finalizeStream() { // Flush audio buffer - processAudioWindow(audio_buffer); + processAudioWindow(audio_buffer_); // Add empty mfcc vectors at end of sample - for (int i = 0; i < model->n_context; ++i) { + for (int i = 0; i < model_->n_context_; ++i) { addZeroMfccWindow(); } // Process final batch - if (batch_buffer.size() > 0) { - processBatch(batch_buffer, batch_buffer.size()/model->mfcc_feats_per_timestep); + if (batch_buffer_.size() > 0) { + processBatch(batch_buffer_, batch_buffer_.size()/model_->mfcc_feats_per_timestep_); } } void StreamingState::addZeroMfccWindow() { - vector zero_buffer(model->n_features, 0.f); + vector zero_buffer(model_->n_features_, 0.f); pushMfccBuffer(zero_buffer); } @@ -332,15 +197,15 @@ StreamingState::pushMfccBuffer(const vector& buf) auto end = buf.end(); while (start != end) { // Copy from input buffer to mfcc_buffer, stopping if we have a full context window - start = copy_up_to_n(start, end, std::back_inserter(mfcc_buffer), - model->mfcc_feats_per_timestep - mfcc_buffer.size()); - assert(mfcc_buffer.size() <= model->mfcc_feats_per_timestep); + start = copy_up_to_n(start, end, std::back_inserter(mfcc_buffer_), + model_->mfcc_feats_per_timestep_ - mfcc_buffer_.size()); + assert(mfcc_buffer_.size() <= model_->mfcc_feats_per_timestep_); // If we have a full context window - if (mfcc_buffer.size() == model->mfcc_feats_per_timestep) { - processMfccWindow(mfcc_buffer); + if (mfcc_buffer_.size() == model_->mfcc_feats_per_timestep_) { + processMfccWindow(mfcc_buffer_); // Shift data by one step of one mfcc feature vector - shift_buffer_left(mfcc_buffer, model->n_features); + shift_buffer_left(mfcc_buffer_, model_->n_features_); } } } @@ -352,14 +217,14 @@ StreamingState::processMfccWindow(const vector& buf) auto end = buf.end(); while (start != end) { // Copy from input buffer to batch_buffer, stopping if we have a full batch - start = copy_up_to_n(start, end, std::back_inserter(batch_buffer), - model->n_steps * model->mfcc_feats_per_timestep - batch_buffer.size()); - assert(batch_buffer.size() <= model->n_steps * model->mfcc_feats_per_timestep); + start = copy_up_to_n(start, end, std::back_inserter(batch_buffer_), + model_->n_steps_ * model_->mfcc_feats_per_timestep_ - batch_buffer_.size()); + assert(batch_buffer_.size() <= model_->n_steps_ * model_->mfcc_feats_per_timestep_); // If we have a full batch - if (batch_buffer.size() == model->n_steps * model->mfcc_feats_per_timestep) { - processBatch(batch_buffer, model->n_steps); - batch_buffer.resize(0); + if (batch_buffer_.size() == model_->n_steps_ * model_->mfcc_feats_per_timestep_) { + processBatch(batch_buffer_, model_->n_steps_); + batch_buffer_.resize(0); } } } @@ -368,272 +233,27 @@ void StreamingState::processBatch(const vector& buf, unsigned int n_steps) { vector logits; - model->infer(buf.data(), n_steps, logits); - + model_->infer(buf.data(), n_steps, logits); + const int cutoff_top_n = 40; const double cutoff_prob = 1.0; - const size_t num_classes = model->alphabet->GetSize() + 1; // +1 for blank - const int n_frames = logits.size() / (BATCH_SIZE * num_classes); + const size_t num_classes = model_->alphabet_->GetSize() + 1; // +1 for blank + const int n_frames = logits.size() / (ModelState::BATCH_SIZE * num_classes); // Convert logits to double vector inputs(logits.begin(), logits.end()); decoder_next(inputs.data(), - *model->alphabet, - decoder_state.get(), + *model_->alphabet_, + decoder_state_.get(), n_frames, num_classes, cutoff_prob, cutoff_top_n, - model->beam_width, - model->scorer); -} - -void -ModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) -{ - const size_t num_classes = alphabet->GetSize() + 1; // +1 for blank - -#ifndef USE_TFLITE - Tensor input(DT_FLOAT, TensorShape({BATCH_SIZE, n_steps, 2*n_context+1, n_features})); - - auto input_mapped = input.flat(); - int i; - for (i = 0; i < n_frames*mfcc_feats_per_timestep; ++i) { - input_mapped(i) = aMfcc[i]; - } - for (; i < n_steps*mfcc_feats_per_timestep; ++i) { - input_mapped(i) = 0.; - } - - Tensor input_lengths(DT_INT32, TensorShape({1})); - input_lengths.scalar()() = n_frames; - - vector outputs; - Status status = session->Run( - {{"input_node", input}, {"input_lengths", input_lengths}}, - {"logits"}, {}, &outputs); - - if (!status.ok()) { - std::cerr << "Error running session: " << status << "\n"; - return; - } - - auto logits_mapped = outputs[0].flat(); - // The CTCDecoder works with log-probs. - for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { - logits_output.push_back(logits_mapped(t)); - } -#else // USE_TFLITE - // Feeding input_node - float* input_node = interpreter->typed_tensor(input_node_idx); - { - int i; - for (i = 0; i < n_frames*mfcc_feats_per_timestep; ++i) { - input_node[i] = aMfcc[i]; - } - for (; i < n_steps*mfcc_feats_per_timestep; ++i) { - input_node[i] = 0; - } - } - - assert(previous_state_size > 0); - - // Feeding previous_state_c, previous_state_h - memcpy(interpreter->typed_tensor(previous_state_c_idx), previous_state_c_.get(), sizeof(float) * previous_state_size); - memcpy(interpreter->typed_tensor(previous_state_h_idx), previous_state_h_.get(), sizeof(float) * previous_state_size); - - interpreter->SetExecutionPlan(acoustic_exec_plan); - TfLiteStatus status = interpreter->Invoke(); - if (status != kTfLiteOk) { - std::cerr << "Error running session: " << status << "\n"; - return; - } - - float* outputs = interpreter->typed_tensor(logits_idx); - - // The CTCDecoder works with log-probs. - for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { - logits_output.push_back(outputs[t]); - } - - memcpy(previous_state_c_.get(), interpreter->typed_tensor(new_state_c_idx), sizeof(float) * previous_state_size); - memcpy(previous_state_h_.get(), interpreter->typed_tensor(new_state_h_idx), sizeof(float) * previous_state_size); -#endif // USE_TFLITE + model_->beam_width_, + model_->scorer_); } -void -ModelState::compute_mfcc(const vector& samples, vector& mfcc_output) -{ -#ifndef USE_TFLITE - Tensor input(DT_FLOAT, TensorShape({audio_win_len})); - auto input_mapped = input.flat(); - int i; - for (i = 0; i < samples.size(); ++i) { - input_mapped(i) = samples[i]; - } - for (; i < audio_win_len; ++i) { - input_mapped(i) = 0.f; - } - - vector outputs; - Status status = session->Run({{"input_samples", input}}, {"mfccs"}, {}, &outputs); - - if (!status.ok()) { - std::cerr << "Error running session: " << status << "\n"; - return; - } - - // The feature computation graph is hardcoded to one audio length for now - const int n_windows = 1; - assert(outputs[0].shape().num_elemements() / n_features == n_windows); - - auto mfcc_mapped = outputs[0].flat(); - for (int i = 0; i < n_windows * n_features; ++i) { - mfcc_output.push_back(mfcc_mapped(i)); - } -#else - // Feeding input_node - float* input_samples = interpreter->typed_tensor(input_samples_idx); - for (int i = 0; i < samples.size(); ++i) { - input_samples[i] = samples[i]; - } - - interpreter->SetExecutionPlan(mfcc_exec_plan); - TfLiteStatus status = interpreter->Invoke(); - if (status != kTfLiteOk) { - std::cerr << "Error running session: " << status << "\n"; - return; - } - - // The feature computation graph is hardcoded to one audio length for now - int n_windows = 1; - TfLiteIntArray* out_dims = interpreter->tensor(mfccs_idx)->dims; - int num_elements = 1; - for (int i = 0; i < out_dims->size; ++i) { - num_elements *= out_dims->data[i]; - } - assert(num_elements / n_features == n_windows); - - float* outputs = interpreter->typed_tensor(mfccs_idx); - for (int i = 0; i < n_windows * n_features; ++i) { - mfcc_output.push_back(outputs[i]); - } -#endif -} - -char* -ModelState::decode(DecoderState* state) -{ - vector out = ModelState::decode_raw(state); - return strdup(alphabet->LabelsToString(out[0].tokens).c_str()); -} - -vector -ModelState::decode_raw(DecoderState* state) -{ - vector out = decoder_decode(state, *alphabet, beam_width, scorer); - - return out; -} - -Metadata* -ModelState::decode_metadata(DecoderState* state) -{ - vector out = decode_raw(state); - - std::unique_ptr metadata(new Metadata()); - metadata->num_items = out[0].tokens.size(); - metadata->probability = out[0].probability; - - std::unique_ptr items(new MetadataItem[metadata->num_items]()); - - // Loop through each character - for (int i = 0; i < out[0].tokens.size(); ++i) { - items[i].character = strdup(alphabet->StringFromLabel(out[0].tokens[i]).c_str()); - items[i].timestep = out[0].timesteps[i]; - items[i].start_time = out[0].timesteps[i] * ((float)audio_win_step / sample_rate); - - if (items[i].start_time < 0) { - items[i].start_time = 0; - } - } - - metadata->items = items.release(); - return metadata.release(); -} - -#ifdef USE_TFLITE -int -tflite_get_tensor_by_name(const ModelState* ctx, const vector& list, const char* name) -{ - int rv = -1; - - for (int i = 0; i < list.size(); ++i) { - const string& node_name = ctx->interpreter->tensor(list[i])->name; - if (node_name.compare(string(name)) == 0) { - rv = i; - } - } - - assert(rv >= 0); - return rv; -} - -int -tflite_get_input_tensor_by_name(const ModelState* ctx, const char* name) -{ - return ctx->interpreter->inputs()[tflite_get_tensor_by_name(ctx, ctx->interpreter->inputs(), name)]; -} - -int -tflite_get_output_tensor_by_name(const ModelState* ctx, const char* name) -{ - return ctx->interpreter->outputs()[tflite_get_tensor_by_name(ctx, ctx->interpreter->outputs(), name)]; -} - -void push_back_if_not_present(std::deque& list, int value) { - if (std::find(list.begin(), list.end(), value) == list.end()) { - list.push_back(value); - } -} - -// Backwards BFS on the node DAG. At each iteration we get the next tensor id -// from the frontier list, then for each node which has that tensor id as an -// output, add it to the parent list, and add its input tensors to the frontier -// list. Because we start from the final tensor and work backwards to the inputs, -// the parents list is constructed in reverse, adding elements to its front. -std::vector -tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) -{ - std::deque parents; - std::deque frontier; - frontier.push_back(tensor_id); - while (!frontier.empty()) { - int next_tensor_id = frontier.front(); - frontier.pop_front(); - // Find all nodes that have next_tensor_id as an output - for (int node_id = 0; node_id < interpreter->nodes_size(); ++node_id) { - TfLiteNode node = interpreter->node_and_registration(node_id)->first; - // Search node outputs for the tensor we're looking for - for (int i = 0; i < node.outputs->size; ++i) { - if (node.outputs->data[i] == next_tensor_id) { - // This node is part of the parent tree, add it to the parent list and - // add its input tensors to the frontier list - parents.push_front(node_id); - for (int j = 0; j < node.inputs->size; ++j) { - push_back_if_not_present(frontier, node.inputs->data[j]); - } - } - } - } - } - - return std::vector(parents.begin(), parents.end()); -} - -#endif - int DS_CreateModel(const char* aModelPath, unsigned int aNCep, @@ -642,15 +262,6 @@ DS_CreateModel(const char* aModelPath, unsigned int aBeamWidth, ModelState** retval) { - std::unique_ptr model(new ModelState()); -#ifndef USE_TFLITE - model->mmap_env = new MemmappedEnv(Env::Default()); -#endif // USE_TFLITE - model->ncep = aNCep; - model->ncontext = aNContext; - model->alphabet = new Alphabet(aAlphabetConfigPath); - model->beam_width = aBeamWidth; - *retval = nullptr; DS_PrintVersions(); @@ -661,182 +272,23 @@ DS_CreateModel(const char* aModelPath, } #ifndef USE_TFLITE - Status status; - SessionOptions options; - - bool is_mmap = std::string(aModelPath).find(".pbmm") != std::string::npos; - if (!is_mmap) { - std::cerr << "Warning: reading entire model file into memory. Transform model file into an mmapped graph to reduce heap usage." << std::endl; - } else { - status = model->mmap_env->InitializeFromFile(aModelPath); - if (!status.ok()) { - std::cerr << status << std::endl; - return DS_ERR_FAIL_INIT_MMAP; - } - - options.config.mutable_graph_options() - ->mutable_optimizer_options() - ->set_opt_level(::OptimizerOptions::L0); - options.env = model->mmap_env; - } - - status = NewSession(options, &model->session); - if (!status.ok()) { - std::cerr << status << std::endl; - return DS_ERR_FAIL_INIT_SESS; - } - - if (is_mmap) { - status = ReadBinaryProto(model->mmap_env, - MemmappedFileSystem::kMemmappedPackageDefaultGraphDef, - &model->graph_def); - } else { - status = ReadBinaryProto(Env::Default(), aModelPath, &model->graph_def); - } - if (!status.ok()) { - std::cerr << status << std::endl; - return DS_ERR_FAIL_READ_PROTOBUF; - } - - status = model->session->Create(model->graph_def); - if (!status.ok()) { - std::cerr << status << std::endl; - return DS_ERR_FAIL_CREATE_SESS; - } - - int graph_version = model->graph_def.version(); - if (graph_version < DS_GRAPH_VERSION) { - std::cerr << "Specified model file version (" << graph_version << ") is " - << "incompatible with minimum version supported by this client (" - << DS_GRAPH_VERSION << "). See " - << "https://github.com/mozilla/DeepSpeech/#model-compatibility " - << "for more information" << std::endl; - return DS_ERR_MODEL_INCOMPATIBLE; - } - - for (int i = 0; i < model->graph_def.node_size(); ++i) { - NodeDef node = model->graph_def.node(i); - if (node.name() == "input_node") { - const auto& shape = node.attr().at("shape").shape(); - model->n_steps = shape.dim(1).size(); - model->n_context = (shape.dim(2).size()-1)/2; - model->n_features = shape.dim(3).size(); - model->mfcc_feats_per_timestep = shape.dim(2).size() * shape.dim(3).size(); - } else if (node.name() == "logits_shape") { - Tensor logits_shape = Tensor(DT_INT32, TensorShape({3})); - if (!logits_shape.FromProto(node.attr().at("value").tensor())) { - continue; - } - - int final_dim_size = logits_shape.vec()(2) - 1; - if (final_dim_size != model->alphabet->GetSize()) { - std::cerr << "Error: Alphabet size does not match loaded model: alphabet " - << "has size " << model->alphabet->GetSize() - << ", but model has " << final_dim_size - << " classes in its output. Make sure you're passing an alphabet " - << "file with the same size as the one used for training." - << std::endl; - return DS_ERR_INVALID_ALPHABET; - } - } else if (node.name() == "model_metadata") { - int sample_rate = node.attr().at("sample_rate").i(); - model->sample_rate = sample_rate; - int win_len_ms = node.attr().at("feature_win_len").i(); - int win_step_ms = node.attr().at("feature_win_step").i(); - model->audio_win_len = sample_rate * (win_len_ms / 1000.0); - model->audio_win_step = sample_rate * (win_step_ms / 1000.0); - } - } - - if (model->n_context == -1 || model->n_features == -1) { - std::cerr << "Error: Could not infer input shape from model file. " - << "Make sure input_node is a 4D tensor with shape " - << "[batch_size=1, time, window_size, n_features]." - << std::endl; - return DS_ERR_INVALID_SHAPE; - } - - *retval = model.release(); - return DS_ERR_OK; -#else // USE_TFLITE - model->fbmodel = tflite::FlatBufferModel::BuildFromFile(aModelPath); - if (!model->fbmodel) { - std::cerr << "Error at reading model file " << aModelPath << std::endl; - return DS_ERR_FAIL_INIT_MMAP; - } - + std::unique_ptr model(new TFModelState()); +#else + std::unique_ptr model(new TFLiteModelState()); +#endif // USE_TFLITE - tflite::ops::builtin::BuiltinOpResolver resolver; - tflite::InterpreterBuilder(*model->fbmodel, resolver)(&model->interpreter); - if (!model->interpreter) { - std::cerr << "Error at InterpreterBuilder for model file " << aModelPath << std::endl; - return DS_ERR_FAIL_INTERPRETER; + if (!model) { + std::cerr << "Could not allocate model state." << std::endl; + return DS_ERR_FAIL_CREATE_MODEL; } - model->interpreter->AllocateTensors(); - model->interpreter->SetNumThreads(4); - - // Query all the index once - model->input_node_idx = tflite_get_input_tensor_by_name(model.get(), "input_node"); - model->previous_state_c_idx = tflite_get_input_tensor_by_name(model.get(), "previous_state_c"); - model->previous_state_h_idx = tflite_get_input_tensor_by_name(model.get(), "previous_state_h"); - model->input_samples_idx = tflite_get_input_tensor_by_name(model.get(), "input_samples"); - model->logits_idx = tflite_get_output_tensor_by_name(model.get(), "logits"); - model->new_state_c_idx = tflite_get_output_tensor_by_name(model.get(), "new_state_c"); - model->new_state_h_idx = tflite_get_output_tensor_by_name(model.get(), "new_state_h"); - model->mfccs_idx = tflite_get_output_tensor_by_name(model.get(), "mfccs"); - - // When we call Interpreter::Invoke, the whole graph is executed by default, - // which means every time compute_mfcc is called the entire acoustic model is - // also executed. To workaround that problem, we walk up the dependency DAG - // from the mfccs output tensor to find all the relevant nodes required for - // feature computation, building an execution plan that runs just those nodes. - auto mfcc_plan = tflite_find_parent_node_ids(model->interpreter.get(), model->mfccs_idx); - auto orig_plan = model->interpreter->execution_plan(); - - // Remove MFCC nodes from original plan (all nodes) to create the acoustic model plan - auto erase_begin = std::remove_if(orig_plan.begin(), orig_plan.end(), [&mfcc_plan](int elem) { - return std::find(mfcc_plan.begin(), mfcc_plan.end(), elem) != mfcc_plan.end(); - }); - orig_plan.erase(erase_begin, orig_plan.end()); - - model->acoustic_exec_plan = std::move(orig_plan); - model->mfcc_exec_plan = std::move(mfcc_plan); - - TfLiteIntArray* dims_input_node = model->interpreter->tensor(model->input_node_idx)->dims; - - model->n_steps = dims_input_node->data[1]; - model->n_context = (dims_input_node->data[2] - 1 ) / 2; - model->n_features = dims_input_node->data[3]; - model->mfcc_feats_per_timestep = dims_input_node->data[2] * dims_input_node->data[3]; - - TfLiteIntArray* dims_logits = model->interpreter->tensor(model->logits_idx)->dims; - const int final_dim_size = dims_logits->data[1] - 1; - if (final_dim_size != model->alphabet->GetSize()) { - std::cerr << "Error: Alphabet size does not match loaded model: alphabet " - << "has size " << model->alphabet->GetSize() - << ", but model has " << final_dim_size - << " classes in its output. Make sure you're passing an alphabet " - << "file with the same size as the one used for training." - << std::endl; - return DS_ERR_INVALID_ALPHABET; + int err = model->init(aModelPath, aNCep, aNContext, aAlphabetConfigPath, aBeamWidth); + if (err != DS_ERR_OK) { + return err; } - TfLiteIntArray* dims_c = model->interpreter->tensor(model->previous_state_c_idx)->dims; - TfLiteIntArray* dims_h = model->interpreter->tensor(model->previous_state_h_idx)->dims; - assert(dims_c->data[1] == dims_h->data[1]); - - model->previous_state_size = dims_c->data[1]; - model->previous_state_c_.reset(new float[model->previous_state_size]()); - model->previous_state_h_.reset(new float[model->previous_state_size]()); - - // Set initial values for previous_state_c and previous_state_h - memset(model->previous_state_c_.get(), 0, sizeof(float) * model->previous_state_size); - memset(model->previous_state_h_.get(), 0, sizeof(float) * model->previous_state_size); - *retval = model.release(); return DS_ERR_OK; -#endif // USE_TFLITE } void @@ -854,10 +306,10 @@ DS_EnableDecoderWithLM(ModelState* aCtx, float aLMBeta) { try { - aCtx->scorer = new Scorer(aLMAlpha, aLMBeta, - aLMPath ? aLMPath : "", - aTriePath ? aTriePath : "", - *aCtx->alphabet); + aCtx->scorer_ = new Scorer(aLMAlpha, aLMBeta, + aLMPath ? aLMPath : "", + aTriePath ? aTriePath : "", + *aCtx->alphabet_); return DS_ERR_OK; } catch (...) { return DS_ERR_INVALID_LM; @@ -872,13 +324,10 @@ DS_SetupStream(ModelState* aCtx, { *retval = nullptr; -#ifndef USE_TFLITE - Status status = aCtx->session->Run({}, {}, {"initialize_state"}, nullptr); - if (!status.ok()) { - std::cerr << "Error running session: " << status << std::endl; - return DS_ERR_FAIL_RUN_SESS; + int err = aCtx->initialize_state(); + if (err != DS_ERR_OK) { + return err; } -#endif // USE_TFLITE std::unique_ptr ctx(new StreamingState()); if (!ctx) { @@ -886,27 +335,20 @@ DS_SetupStream(ModelState* aCtx, return DS_ERR_FAIL_CREATE_STREAM; } - const size_t num_classes = aCtx->alphabet->GetSize() + 1; // +1 for blank + const size_t num_classes = aCtx->alphabet_->GetSize() + 1; // +1 for blank // Default initial allocation = 3 seconds. if (aPreAllocFrames == 0) { aPreAllocFrames = 150; } - ctx->audio_buffer.reserve(aCtx->audio_win_len); - ctx->mfcc_buffer.reserve(aCtx->mfcc_feats_per_timestep); - ctx->mfcc_buffer.resize(aCtx->n_features*aCtx->n_context, 0.f); - ctx->batch_buffer.reserve(aCtx->n_steps * aCtx->mfcc_feats_per_timestep); + ctx->audio_buffer_.reserve(aCtx->audio_win_len_); + ctx->mfcc_buffer_.reserve(aCtx->mfcc_feats_per_timestep_); + ctx->mfcc_buffer_.resize(aCtx->n_features_*aCtx->n_context_, 0.f); + ctx->batch_buffer_.reserve(aCtx->n_steps_ * aCtx->mfcc_feats_per_timestep_); + ctx->model_ = aCtx; - ctx->model = aCtx; - -#ifdef USE_TFLITE - /* Ensure previous_state_{c,h} are not holding previous stream value */ - memset(ctx->model->previous_state_c_.get(), 0, sizeof(float) * ctx->model->previous_state_size); - memset(ctx->model->previous_state_h_.get(), 0, sizeof(float) * ctx->model->previous_state_size); -#endif // USE_TFLITE - - ctx->decoder_state.reset(decoder_init(*aCtx->alphabet, num_classes, aCtx->scorer)); + ctx->decoder_state_.reset(decoder_init(*aCtx->alphabet_, num_classes, aCtx->scorer_)); *retval = ctx.release(); return DS_ERR_OK; @@ -1012,4 +454,3 @@ DS_PrintVersions() { LOGD("DeepSpeech: %s", ds_git_version()); #endif } - diff --git a/native_client/deepspeech.h b/native_client/deepspeech.h index 2f4637ceeb..b40da6065a 100644 --- a/native_client/deepspeech.h +++ b/native_client/deepspeech.h @@ -52,6 +52,7 @@ enum DeepSpeech_Error_Codes DS_ERR_FAIL_CREATE_STREAM = 0x3004, DS_ERR_FAIL_READ_PROTOBUF = 0x3005, DS_ERR_FAIL_CREATE_SESS = 0x3006, + DS_ERR_FAIL_CREATE_MODEL = 0x3007, }; /** diff --git a/native_client/modelstate.cc b/native_client/modelstate.cc new file mode 100644 index 0000000000..c3fda2b938 --- /dev/null +++ b/native_client/modelstate.cc @@ -0,0 +1,81 @@ +#include + +#include "ctcdecode/ctc_beam_search_decoder.h" + +#include "modelstate.h" + +using std::vector; + +ModelState::ModelState() + : alphabet_(nullptr) + , scorer_(nullptr) + , beam_width_(-1) + , n_steps_(-1) + , n_context_(-1) + , n_features_(-1) + , mfcc_feats_per_timestep_(-1) + , sample_rate_(DEFAULT_SAMPLE_RATE) + , audio_win_len_(DEFAULT_WINDOW_LENGTH) + , audio_win_step_(DEFAULT_WINDOW_STEP) +{ +} + +ModelState::~ModelState() +{ + delete scorer_; + delete alphabet_; +} + +int +ModelState::init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) +{ + n_features_ = n_features; + n_context_ = n_context; + alphabet_ = new Alphabet(alphabet_path); + beam_width_ = beam_width; + return DS_ERR_OK; +} + +vector +ModelState::decode_raw(DecoderState* state) +{ + vector out = decoder_decode(state, *alphabet_, beam_width_, scorer_); + return out; +} + +char* +ModelState::decode(DecoderState* state) +{ + vector out = decode_raw(state); + return strdup(alphabet_->LabelsToString(out[0].tokens).c_str()); +} + +Metadata* +ModelState::decode_metadata(DecoderState* state) +{ + vector out = decode_raw(state); + + std::unique_ptr metadata(new Metadata()); + metadata->num_items = out[0].tokens.size(); + metadata->probability = out[0].probability; + + std::unique_ptr items(new MetadataItem[metadata->num_items]()); + + // Loop through each character + for (int i = 0; i < out[0].tokens.size(); ++i) { + items[i].character = strdup(alphabet_->StringFromLabel(out[0].tokens[i]).c_str()); + items[i].timestep = out[0].timesteps[i]; + items[i].start_time = out[0].timesteps[i] * ((float)audio_win_step_ / sample_rate_); + + if (items[i].start_time < 0) { + items[i].start_time = 0; + } + } + + metadata->items = items.release(); + return metadata.release(); +} diff --git a/native_client/modelstate.h b/native_client/modelstate.h new file mode 100644 index 0000000000..7f53c63e62 --- /dev/null +++ b/native_client/modelstate.h @@ -0,0 +1,88 @@ +#ifndef MODELSTATE_H +#define MODELSTATE_H + +#include + +#include "deepspeech.h" +#include "alphabet.h" + +#include "ctcdecode/scorer.h" +#include "ctcdecode/output.h" +#include "ctcdecode/decoderstate.h" + +struct ModelState { + //TODO: infer batch size from model/use dynamic batch size + static constexpr unsigned int BATCH_SIZE = 1; + + static constexpr unsigned int DEFAULT_SAMPLE_RATE = 16000; + static constexpr unsigned int DEFAULT_WINDOW_LENGTH = DEFAULT_SAMPLE_RATE * 0.032; + static constexpr unsigned int DEFAULT_WINDOW_STEP = DEFAULT_SAMPLE_RATE * 0.02; + + Alphabet* alphabet_; + Scorer* scorer_; + unsigned int beam_width_; + unsigned int n_steps_; + unsigned int n_context_; + unsigned int n_features_; + unsigned int mfcc_feats_per_timestep_; + unsigned int sample_rate_; + unsigned int audio_win_len_; + unsigned int audio_win_step_; + + ModelState(); + virtual ~ModelState(); + + virtual int init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width); + + virtual int initialize_state() = 0; + + virtual void compute_mfcc(const std::vector& audio_buffer, std::vector& mfcc_output) = 0; + + /** + * @brief Do a single inference step in the acoustic model, with: + * input=mfcc + * input_lengths=[n_frames] + * + * @param mfcc batch input data + * @param n_frames number of timesteps in the data + * + * @param[out] output_logits Where to store computed logits. + */ + virtual void infer(const float* mfcc, unsigned int n_frames, std::vector& logits_output) = 0; + + /** + * @brief Perform decoding of the logits, using basic CTC decoder or + * CTC decoder with KenLM enabled + * + * @param state Decoder state to use when decoding. + * + * @return Vector of Output structs directly from the CTC decoder for additional processing. + */ + virtual std::vector decode_raw(DecoderState* state); + + /** + * @brief Perform decoding of the logits, using basic CTC decoder or + * CTC decoder with KenLM enabled + * + * @param state Decoder state to use when decoding. + * + * @return String representing the decoded text. + */ + virtual char* decode(DecoderState* state); + + /** + * @brief Return character-level metadata including letter timings. + * + * @param state Decoder state to use when decoding. + * + * @return Metadata struct containing MetadataItem structs for each character. + * The user is responsible for freeing Metadata by calling DS_FreeMetadata(). + */ + virtual Metadata* decode_metadata(DecoderState* state); +}; + +#endif // MODELSTATE_H diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc new file mode 100644 index 0000000000..f1e9753905 --- /dev/null +++ b/native_client/tflitemodelstate.cc @@ -0,0 +1,258 @@ +#include "tflitemodelstate.h" + +using namespace tflite; +using std::vector; + +int +tflite_get_tensor_by_name(const Interpreter* interpreter, + const vector& list, + const char* name) +{ + int rv = -1; + + for (int i = 0; i < list.size(); ++i) { + const string& node_name = interpreter->tensor(list[i])->name; + if (node_name.compare(string(name)) == 0) { + rv = i; + } + } + + assert(rv >= 0); + return rv; +} + +int +tflite_get_input_tensor_by_name(const Interpreter* interpreter, const char* name) +{ + int idx = tflite_get_tensor_by_name(interpreter, interpreter->inputs(), name); + return interpreter->inputs()[idx]; +} + +int +tflite_get_output_tensor_by_name(const Interpreter* interpreter, const char* name) +{ + int idx = tflite_get_tensor_by_name(interpreter, interpreter->outputs(), name); + return interpreter->outputs()[idx]; +} + +void push_back_if_not_present(std::deque& list, int value) +{ + if (std::find(list.begin(), list.end(), value) == list.end()) { + list.push_back(value); + } +} + +// Backwards BFS on the node DAG. At each iteration we get the next tensor id +// from the frontier list, then for each node which has that tensor id as an +// output, add it to the parent list, and add its input tensors to the frontier +// list. Because we start from the final tensor and work backwards to the inputs, +// the parents list is constructed in reverse, adding elements to its front. +std::vector +tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) +{ + std::deque parents; + std::deque frontier; + frontier.push_back(tensor_id); + while (!frontier.empty()) { + int next_tensor_id = frontier.front(); + frontier.pop_front(); + // Find all nodes that have next_tensor_id as an output + for (int node_id = 0; node_id < interpreter->nodes_size(); ++node_id) { + TfLiteNode node = interpreter->node_and_registration(node_id)->first; + // Search node outputs for the tensor we're looking for + for (int i = 0; i < node.outputs->size; ++i) { + if (node.outputs->data[i] == next_tensor_id) { + // This node is part of the parent tree, add it to the parent list and + // add its input tensors to the frontier list + parents.push_front(node_id); + for (int j = 0; j < node.inputs->size; ++j) { + push_back_if_not_present(frontier, node.inputs->data[j]); + } + } + } + } + } + + return std::vector(parents.begin(), parents.end()); +} + +TFLiteModelState::TFLiteModelState() + : ModelState() + , interpreter_(nullptr) + , fbmodel_(nullptr) + , previous_state_size_(0) + , previous_state_c_(nullptr) + , previous_state_h_(nullptr) +{ +} + +int +TFLiteModelState::init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) +{ + int err = ModelState::init(model_path, n_features, n_context, alphabet_path, beam_width); + if (err != DS_ERR_OK) { + return err; + } + + fbmodel_ = tflite::FlatBufferModel::BuildFromFile(model_path); + if (!fbmodel_) { + std::cerr << "Error at reading model file " << model_path << std::endl; + return DS_ERR_FAIL_INIT_MMAP; + } + + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder(*fbmodel_, resolver)(&interpreter_); + if (!interpreter_) { + std::cerr << "Error at InterpreterBuilder for model file " << model_path << std::endl; + return DS_ERR_FAIL_INTERPRETER; + } + + interpreter_->AllocateTensors(); + interpreter_->SetNumThreads(4); + + // Query all the index once + input_node_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "input_node"); + previous_state_c_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "previous_state_c"); + previous_state_h_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "previous_state_h"); + input_samples_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "input_samples"); + logits_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "logits"); + new_state_c_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "new_state_c"); + new_state_h_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "new_state_h"); + mfccs_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "mfccs"); + + // When we call Interpreter::Invoke, the whole graph is executed by default, + // which means every time compute_mfcc is called the entire acoustic model is + // also executed. To workaround that problem, we walk up the dependency DAG + // from the mfccs output tensor to find all the relevant nodes required for + // feature computation, building an execution plan that runs just those nodes. + auto mfcc_plan = tflite_find_parent_node_ids(interpreter_.get(), mfccs_idx_); + auto orig_plan = interpreter_->execution_plan(); + + // Remove MFCC nodes from original plan (all nodes) to create the acoustic model plan + auto erase_begin = std::remove_if(orig_plan.begin(), orig_plan.end(), [&mfcc_plan](int elem) { + return std::find(mfcc_plan.begin(), mfcc_plan.end(), elem) != mfcc_plan.end(); + }); + orig_plan.erase(erase_begin, orig_plan.end()); + + acoustic_exec_plan_ = std::move(orig_plan); + mfcc_exec_plan_ = std::move(mfcc_plan); + + TfLiteIntArray* dims_input_node = interpreter_->tensor(input_node_idx_)->dims; + + n_steps_ = dims_input_node->data[1]; + n_context_ = (dims_input_node->data[2] - 1) / 2; + n_features_ = dims_input_node->data[3]; + mfcc_feats_per_timestep_ = dims_input_node->data[2] * dims_input_node->data[3]; + + TfLiteIntArray* dims_logits = interpreter_->tensor(logits_idx_)->dims; + const int final_dim_size = dims_logits->data[1] - 1; + if (final_dim_size != alphabet_->GetSize()) { + std::cerr << "Error: Alphabet size does not match loaded model: alphabet " + << "has size " << alphabet_->GetSize() + << ", but model has " << final_dim_size + << " classes in its output. Make sure you're passing an alphabet " + << "file with the same size as the one used for training." + << std::endl; + return DS_ERR_INVALID_ALPHABET; + } + + TfLiteIntArray* dims_c = interpreter_->tensor(previous_state_c_idx_)->dims; + TfLiteIntArray* dims_h = interpreter_->tensor(previous_state_h_idx_)->dims; + assert(dims_c->data[1] == dims_h->data[1]); + + previous_state_size_ = dims_c->data[1]; + previous_state_c_.reset(new float[previous_state_size_]()); + previous_state_h_.reset(new float[previous_state_size_]()); + + // Set initial values for previous_state_c and previous_state_h + memset(previous_state_c_.get(), 0, sizeof(float) * previous_state_size_); + memset(previous_state_h_.get(), 0, sizeof(float) * previous_state_size_); + + return DS_ERR_OK; +} + +int +TFLiteModelState::initialize_state() +{ + /* Ensure previous_state_{c,h} are not holding previous stream value */ + memset(previous_state_c_.get(), 0, sizeof(float) * previous_state_size_); + memset(previous_state_h_.get(), 0, sizeof(float) * previous_state_size_); + + return DS_ERR_OK; +} + +void +TFLiteModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) +{ + const size_t num_classes = alphabet_->GetSize() + 1; // +1 for blank + + // Feeding input_node + float* input_node = interpreter_->typed_tensor(input_node_idx_); + { + int i; + for (i = 0; i < n_frames*mfcc_feats_per_timestep_; ++i) { + input_node[i] = aMfcc[i]; + } + for (; i < n_steps_*mfcc_feats_per_timestep_; ++i) { + input_node[i] = 0; + } + } + + assert(previous_state_size_ > 0); + + // Feeding previous_state_c, previous_state_h + memcpy(interpreter_->typed_tensor(previous_state_c_idx_), previous_state_c_.get(), sizeof(float) * previous_state_size_); + memcpy(interpreter_->typed_tensor(previous_state_h_idx_), previous_state_h_.get(), sizeof(float) * previous_state_size_); + + interpreter_->SetExecutionPlan(acoustic_exec_plan_); + TfLiteStatus status = interpreter_->Invoke(); + if (status != kTfLiteOk) { + std::cerr << "Error running session: " << status << "\n"; + return; + } + + float* outputs = interpreter_->typed_tensor(logits_idx_); + + // The CTCDecoder works with log-probs. + for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { + logits_output.push_back(outputs[t]); + } + + memcpy(previous_state_c_.get(), interpreter_->typed_tensor(new_state_c_idx_), sizeof(float) * previous_state_size_); + memcpy(previous_state_h_.get(), interpreter_->typed_tensor(new_state_h_idx_), sizeof(float) * previous_state_size_); +} + +void +TFLiteModelState::compute_mfcc(const vector& samples, vector& mfcc_output) +{ + // Feeding input_node + float* input_samples = interpreter_->typed_tensor(input_samples_idx_); + for (int i = 0; i < samples.size(); ++i) { + input_samples[i] = samples[i]; + } + + interpreter_->SetExecutionPlan(mfcc_exec_plan_); + TfLiteStatus status = interpreter_->Invoke(); + if (status != kTfLiteOk) { + std::cerr << "Error running session: " << status << "\n"; + return; + } + + // The feature computation graph is hardcoded to one audio length for now + int n_windows = 1; + TfLiteIntArray* out_dims = interpreter_->tensor(mfccs_idx_)->dims; + int num_elements = 1; + for (int i = 0; i < out_dims->size; ++i) { + num_elements *= out_dims->data[i]; + } + assert(num_elements / n_features_ == n_windows); + + float* outputs = interpreter_->typed_tensor(mfccs_idx_); + for (int i = 0; i < n_windows * n_features_; ++i) { + mfcc_output.push_back(outputs[i]); + } +} diff --git a/native_client/tflitemodelstate.h b/native_client/tflitemodelstate.h new file mode 100644 index 0000000000..de02074dbe --- /dev/null +++ b/native_client/tflitemodelstate.h @@ -0,0 +1,51 @@ +#ifndef TFLITEMODELSTATE_H +#define TFLITEMODELSTATE_H + +#include +#include + +#include "tensorflow/lite/model.h" +#include "tensorflow/lite/kernels/register.h" + +#include "modelstate.h" + +struct TFLiteModelState : public ModelState +{ + std::unique_ptr interpreter_; + std::unique_ptr fbmodel_; + + size_t previous_state_size_; + std::unique_ptr previous_state_c_; + std::unique_ptr previous_state_h_; + + int input_node_idx_; + int previous_state_c_idx_; + int previous_state_h_idx_; + int input_samples_idx_; + + int logits_idx_; + int new_state_c_idx_; + int new_state_h_idx_; + int mfccs_idx_; + + std::vector acoustic_exec_plan_; + std::vector mfcc_exec_plan_; + + TFLiteModelState(); + + virtual int init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) override; + + virtual int initialize_state() override; + + virtual void compute_mfcc(const std::vector& audio_buffer, + std::vector& mfcc_output) override; + + virtual void infer(const float* mfcc, unsigned int n_frames, + std::vector& logits_output) override; +}; + +#endif // TFLITEMODELSTATE_H diff --git a/native_client/tfmodelstate.cc b/native_client/tfmodelstate.cc new file mode 100644 index 0000000000..866775e4d2 --- /dev/null +++ b/native_client/tfmodelstate.cc @@ -0,0 +1,214 @@ +#include "tfmodelstate.h" + +#include "ds_graph_version.h" + +using namespace tensorflow; +using std::vector; + +TFModelState::TFModelState() + : ModelState() + , mmap_env_(nullptr) + , session_(nullptr) +{ +} + +TFModelState::~TFModelState() +{ + if (session_) { + Status status = session_->Close(); + if (!status.ok()) { + std::cerr << "Error closing TensorFlow session: " << status << std::endl; + } + } + delete mmap_env_; +} + +int +TFModelState::init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) +{ + int err = ModelState::init(model_path, n_features, n_context, alphabet_path, beam_width); + if (err != DS_ERR_OK) { + return err; + } + + Status status; + SessionOptions options; + + mmap_env_ = new MemmappedEnv(Env::Default()); + + bool is_mmap = std::string(model_path).find(".pbmm") != std::string::npos; + if (!is_mmap) { + std::cerr << "Warning: reading entire model file into memory. Transform model file into an mmapped graph to reduce heap usage." << std::endl; + } else { + status = mmap_env_->InitializeFromFile(model_path); + if (!status.ok()) { + std::cerr << status << std::endl; + return DS_ERR_FAIL_INIT_MMAP; + } + + options.config.mutable_graph_options() + ->mutable_optimizer_options() + ->set_opt_level(::OptimizerOptions::L0); + options.env = mmap_env_; + } + + status = NewSession(options, &session_); + if (!status.ok()) { + std::cerr << status << std::endl; + return DS_ERR_FAIL_INIT_SESS; + } + + if (is_mmap) { + status = ReadBinaryProto(mmap_env_, + MemmappedFileSystem::kMemmappedPackageDefaultGraphDef, + &graph_def_); + } else { + status = ReadBinaryProto(Env::Default(), model_path, &graph_def_); + } + if (!status.ok()) { + std::cerr << status << std::endl; + return DS_ERR_FAIL_READ_PROTOBUF; + } + + status = session_->Create(graph_def_); + if (!status.ok()) { + std::cerr << status << std::endl; + return DS_ERR_FAIL_CREATE_SESS; + } + + int graph_version = graph_def_.version(); + if (graph_version < DS_GRAPH_VERSION) { + std::cerr << "Specified model file version (" << graph_version << ") is " + << "incompatible with minimum version supported by this client (" + << DS_GRAPH_VERSION << "). See " + << "https://github.com/mozilla/DeepSpeech/#model-compatibility " + << "for more information" << std::endl; + return DS_ERR_MODEL_INCOMPATIBLE; + } + + for (int i = 0; i < graph_def_.node_size(); ++i) { + NodeDef node = graph_def_.node(i); + if (node.name() == "input_node") { + const auto& shape = node.attr().at("shape").shape(); + n_steps_ = shape.dim(1).size(); + n_context_ = (shape.dim(2).size()-1)/2; + n_features_ = shape.dim(3).size(); + mfcc_feats_per_timestep_ = shape.dim(2).size() * shape.dim(3).size(); + } else if (node.name() == "logits_shape") { + Tensor logits_shape = Tensor(DT_INT32, TensorShape({3})); + if (!logits_shape.FromProto(node.attr().at("value").tensor())) { + continue; + } + + int final_dim_size = logits_shape.vec()(2) - 1; + if (final_dim_size != alphabet_->GetSize()) { + std::cerr << "Error: Alphabet size does not match loaded model: alphabet " + << "has size " << alphabet_->GetSize() + << ", but model has " << final_dim_size + << " classes in its output. Make sure you're passing an alphabet " + << "file with the same size as the one used for training." + << std::endl; + return DS_ERR_INVALID_ALPHABET; + } + } else if (node.name() == "model_metadata") { + sample_rate_ = node.attr().at("sample_rate").i(); + int win_len_ms = node.attr().at("feature_win_len").i(); + int win_step_ms = node.attr().at("feature_win_step").i(); + audio_win_len_ = sample_rate_ * (win_len_ms / 1000.0); + audio_win_step_ = sample_rate_ * (win_step_ms / 1000.0); + } + } + + if (n_context_ == -1 || n_features_ == -1) { + std::cerr << "Error: Could not infer input shape from model file. " + << "Make sure input_node is a 4D tensor with shape " + << "[batch_size=1, time, window_size, n_features]." + << std::endl; + return DS_ERR_INVALID_SHAPE; + } + + return DS_ERR_OK; +} + +int +TFModelState::initialize_state() +{ + Status status = session_->Run({}, {}, {"initialize_state"}, nullptr); + if (!status.ok()) { + std::cerr << "Error running session: " << status << std::endl; + return DS_ERR_FAIL_RUN_SESS; + } + + return DS_ERR_OK; +} + +void +TFModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) +{ + const size_t num_classes = alphabet_->GetSize() + 1; // +1 for blank + + Tensor input(DT_FLOAT, TensorShape({BATCH_SIZE, n_steps_, 2*n_context_+1, n_features_})); + + auto input_mapped = input.flat(); + int i; + for (i = 0; i < n_frames*mfcc_feats_per_timestep_; ++i) { + input_mapped(i) = aMfcc[i]; + } + for (; i < n_steps_*mfcc_feats_per_timestep_; ++i) { + input_mapped(i) = 0.; + } + + Tensor input_lengths(DT_INT32, TensorShape({1})); + input_lengths.scalar()() = n_frames; + + vector outputs; + Status status = session_->Run( + {{"input_node", input}, {"input_lengths", input_lengths}}, + {"logits"}, {}, &outputs); + + if (!status.ok()) { + std::cerr << "Error running session: " << status << "\n"; + return; + } + + auto logits_mapped = outputs[0].flat(); + // The CTCDecoder works with log-probs. + for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { + logits_output.push_back(logits_mapped(t)); + } +} + +void +TFModelState::compute_mfcc(const vector& samples, vector& mfcc_output) +{ + Tensor input(DT_FLOAT, TensorShape({audio_win_len_})); + auto input_mapped = input.flat(); + int i; + for (i = 0; i < samples.size(); ++i) { + input_mapped(i) = samples[i]; + } + for (; i < audio_win_len_; ++i) { + input_mapped(i) = 0.f; + } + + vector outputs; + Status status = session_->Run({{"input_samples", input}}, {"mfccs"}, {}, &outputs); + + if (!status.ok()) { + std::cerr << "Error running session: " << status << "\n"; + return; + } + + // The feature computation graph is hardcoded to one audio length for now + const int n_windows = 1; + assert(outputs[0].shape().num_elements() / n_features_ == n_windows); + + auto mfcc_mapped = outputs[0].flat(); + for (int i = 0; i < n_windows * n_features_; ++i) { + mfcc_output.push_back(mfcc_mapped(i)); + } +} diff --git a/native_client/tfmodelstate.h b/native_client/tfmodelstate.h new file mode 100644 index 0000000000..c3dc770855 --- /dev/null +++ b/native_client/tfmodelstate.h @@ -0,0 +1,37 @@ +#ifndef TFMODELSTATE_H +#define TFMODELSTATE_H + +#include + +#include "tensorflow/core/public/session.h" +#include "tensorflow/core/platform/env.h" +#include "tensorflow/core/util/memmapped_file_system.h" + +#include "modelstate.h" + +struct TFModelState : public ModelState +{ + tensorflow::MemmappedEnv* mmap_env_; + tensorflow::Session* session_; + tensorflow::GraphDef graph_def_; + + TFModelState(); + virtual ~TFModelState(); + + virtual int init(const char* model_path, + unsigned int n_features, + unsigned int n_context, + const char* alphabet_path, + unsigned int beam_width) override; + + virtual int initialize_state() override; + + virtual void infer(const float* mfcc, + unsigned int n_frames, + std::vector& logits_output) override; + + virtual void compute_mfcc(const std::vector& audio_buffer, + std::vector& mfcc_output) override; +}; + +#endif // TFMODELSTATE_H From 6e78bac799362516f029708c581e07de2c8fcf08 Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Tue, 4 Jun 2019 18:11:37 -0300 Subject: [PATCH 2/6] Address review comments --- native_client/deepspeech.cc | 8 +++++--- native_client/tflitemodelstate.cc | 16 +++++++++++++--- native_client/tflitemodelstate.h | 1 + 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/native_client/deepspeech.cc b/native_client/deepspeech.cc index 9955cf861c..1ee22d5864 100644 --- a/native_client/deepspeech.cc +++ b/native_client/deepspeech.cc @@ -271,11 +271,13 @@ DS_CreateModel(const char* aModelPath, return DS_ERR_NO_MODEL; } + std::unique_ptr model( #ifndef USE_TFLITE - std::unique_ptr model(new TFModelState()); + new TFModelState() #else - std::unique_ptr model(new TFLiteModelState()); -#endif // USE_TFLITE + new TFLiteModelState() +#endif + ); if (!model) { std::cerr << "Could not allocate model state." << std::endl; diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc index f1e9753905..92d9c01472 100644 --- a/native_client/tflitemodelstate.cc +++ b/native_client/tflitemodelstate.cc @@ -35,7 +35,8 @@ tflite_get_output_tensor_by_name(const Interpreter* interpreter, const char* nam return interpreter->outputs()[idx]; } -void push_back_if_not_present(std::deque& list, int value) +void +push_back_if_not_present(std::deque& list, int value) { if (std::find(list.begin(), list.end(), value) == list.end()) { list.push_back(value); @@ -86,6 +87,10 @@ TFLiteModelState::TFLiteModelState() { } +TFLiteModelState::~TFLiteModelState() +{ +} + int TFLiteModelState::init(const char* model_path, unsigned int n_features, @@ -235,8 +240,13 @@ TFLiteModelState::compute_mfcc(const vector& samples, vector& mfcc input_samples[i] = samples[i]; } - interpreter_->SetExecutionPlan(mfcc_exec_plan_); - TfLiteStatus status = interpreter_->Invoke(); + TfLiteStatus status = interpreter_->SetExecutionPlan(mfcc_exec_plan_); + if (status != kTfLiteOk) { + std::cerr << "Error setting execution plan: " << status << "\n"; + return; + } + + status = interpreter_->Invoke(); if (status != kTfLiteOk) { std::cerr << "Error running session: " << status << "\n"; return; diff --git a/native_client/tflitemodelstate.h b/native_client/tflitemodelstate.h index de02074dbe..ee5bfb6a91 100644 --- a/native_client/tflitemodelstate.h +++ b/native_client/tflitemodelstate.h @@ -32,6 +32,7 @@ struct TFLiteModelState : public ModelState std::vector mfcc_exec_plan_; TFLiteModelState(); + virtual ~TFLiteModelState(); virtual int init(const char* model_path, unsigned int n_features, From e51b9d987d162bd4cbad0a0c94295ae7809ea086 Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Thu, 6 Jun 2019 16:40:19 -0300 Subject: [PATCH 3/6] Remove previous state model variable, track by hand in StreamingState instead --- DeepSpeech.py | 92 ++++++++----------- GRAPH_VERSION | 2 +- native_client/BUILD | 14 +-- native_client/deepspeech.cc | 18 ++-- native_client/modelstate.cc | 1 + native_client/modelstate.h | 11 ++- native_client/tflitemodelstate.cc | 142 +++++++++++++++--------------- native_client/tflitemodelstate.h | 27 ++++-- native_client/tfmodelstate.cc | 94 ++++++++++++-------- native_client/tfmodelstate.h | 10 ++- 10 files changed, 211 insertions(+), 200 deletions(-) diff --git a/DeepSpeech.py b/DeepSpeech.py index 1883724de8..7e92e2023e 100755 --- a/DeepSpeech.py +++ b/DeepSpeech.py @@ -574,12 +574,8 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): # no state management since n_step is expected to be dynamic too (see below) previous_state = previous_state_c = previous_state_h = None else: - if tflite: - previous_state_c = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_c') - previous_state_h = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_h') - else: - previous_state_c = variable_on_cpu('previous_state_c', [batch_size, Config.n_cell_dim], initializer=None) - previous_state_h = variable_on_cpu('previous_state_h', [batch_size, Config.n_cell_dim], initializer=None) + previous_state_c = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_c') + previous_state_h = tf.placeholder(tf.float32, [batch_size, Config.n_cell_dim], name='previous_state_h') previous_state = tf.contrib.rnn.LSTMStateTuple(previous_state_c, previous_state_h) @@ -605,7 +601,7 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): logits = tf.squeeze(logits, [1]) # Apply softmax for CTC decoder - logits = tf.nn.softmax(logits) + logits = tf.nn.softmax(logits, name='logits') if batch_size <= 0: if tflite: @@ -618,51 +614,31 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): 'input_lengths': seq_length, }, { - 'outputs': tf.identity(logits, name='logits'), + 'outputs': logits, }, layers ) new_state_c, new_state_h = layers['rnn_output_state'] - if tflite: - logits = tf.identity(logits, name='logits') - new_state_c = tf.identity(new_state_c, name='new_state_c') - new_state_h = tf.identity(new_state_h, name='new_state_h') - - inputs = { - 'input': input_tensor, - 'previous_state_c': previous_state_c, - 'previous_state_h': previous_state_h, - 'input_samples': input_samples, - } - - if FLAGS.use_seq_length: - inputs.update({'input_lengths': seq_length}) - - outputs = { - 'outputs': logits, - 'new_state_c': new_state_c, - 'new_state_h': new_state_h, - 'mfccs': mfccs, - } - else: - zero_state = tf.zeros([batch_size, Config.n_cell_dim], tf.float32) - initialize_c = tf.assign(previous_state_c, zero_state) - initialize_h = tf.assign(previous_state_h, zero_state) - initialize_state = tf.group(initialize_c, initialize_h, name='initialize_state') - with tf.control_dependencies([tf.assign(previous_state_c, new_state_c), tf.assign(previous_state_h, new_state_h)]): - logits = tf.identity(logits, name='logits') - - inputs = { - 'input': input_tensor, - 'input_lengths': seq_length, - 'input_samples': input_samples, - } - outputs = { - 'outputs': logits, - 'initialize_state': initialize_state, - 'mfccs': mfccs, - } + new_state_c = tf.identity(new_state_c, name='new_state_c') + new_state_h = tf.identity(new_state_h, name='new_state_h') + + inputs = { + 'input': input_tensor, + 'previous_state_c': previous_state_c, + 'previous_state_h': previous_state_h, + 'input_samples': input_samples, + } + + if FLAGS.use_seq_length: + inputs.update({'input_lengths': seq_length}) + + outputs = { + 'outputs': logits, + 'new_state_c': new_state_c, + 'new_state_h': new_state_h, + 'mfccs': mfccs, + } return inputs, outputs, layers @@ -682,10 +658,12 @@ def export(): output_names_ops = [op.name for op in outputs.values() if isinstance(op, Operation)] output_names = ",".join(output_names_tensors + output_names_ops) - if not FLAGS.export_tflite: - mapping = {v.op.name: v for v in tf.global_variables() if not v.op.name.startswith('previous_state_')} - else: + mapping = None + if FLAGS.export_tflite: # Create a saver using variables from the above newly created graph + # Training graph uses LSTMFusedCell, but the TFLite inference graph uses + # a static RNN with a normal cell, so we need to rewrite the names to + # match the training weights when restoring. def fixup(name): if name.startswith('rnn/lstm_cell/'): return name.replace('rnn/lstm_cell/', 'lstm_fused_cell/') @@ -710,7 +688,7 @@ def fixup(name): if not os.path.isdir(FLAGS.export_dir): os.makedirs(FLAGS.export_dir) - def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklist=None): + def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklist=''): frozen = freeze_graph.freeze_graph_with_def_protos( input_graph_def=tf.get_default_graph().as_graph_def(), input_saver_def=saver.as_saver_def(), @@ -731,7 +709,7 @@ def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklis placeholder_type_enum=tf.float32.as_datatype_enum) if not FLAGS.export_tflite: - frozen_graph = do_graph_freeze(output_node_names=output_names, variables_blacklist='previous_state_c,previous_state_h') + frozen_graph = do_graph_freeze(output_node_names=output_names) frozen_graph.version = int(file_relative_read('GRAPH_VERSION').strip()) # Add a no-op node to the graph with metadata information to be loaded by the native client @@ -747,7 +725,7 @@ def do_graph_freeze(output_file=None, output_node_names=None, variables_blacklis with open(output_graph_path, 'wb') as fout: fout.write(frozen_graph.SerializeToString()) else: - frozen_graph = do_graph_freeze(output_node_names=output_names, variables_blacklist='') + frozen_graph = do_graph_freeze(output_node_names=output_names) output_tflite_path = os.path.join(FLAGS.export_dir, output_filename.replace('.pb', '.tflite')) converter = tf.lite.TFLiteConverter(frozen_graph, input_tensors=inputs.values(), output_tensors=outputs.values()) @@ -771,8 +749,7 @@ def do_single_file_inference(input_file_path): inputs, outputs, _ = create_inference_graph(batch_size=1, n_steps=-1) # Create a saver using variables from the above newly created graph - mapping = {v.op.name: v for v in tf.global_variables() if not v.op.name.startswith('previous_state_')} - saver = tf.train.Saver(mapping) + saver = tf.train.Saver() # Restore variables from training checkpoint # TODO: This restores the most recent checkpoint, but if we use validation to counteract @@ -784,9 +761,10 @@ def do_single_file_inference(input_file_path): checkpoint_path = checkpoint.model_checkpoint_path saver.restore(session, checkpoint_path) - session.run(outputs['initialize_state']) features, features_len = audiofile_to_features(input_file_path) + previous_state_c = np.zeros([1, Config.n_cell_dim]) + previous_state_h = np.zeros([1, Config.n_cell_dim]) # Add batch dimension features = tf.expand_dims(features, 0) @@ -799,6 +777,8 @@ def do_single_file_inference(input_file_path): logits = outputs['outputs'].eval(feed_dict={ inputs['input']: features, inputs['input_lengths']: features_len, + inputs['previous_state_c']: previous_state_c, + inputs['previous_state_h']: previous_state_h, }, session=session) logits = np.squeeze(logits) diff --git a/GRAPH_VERSION b/GRAPH_VERSION index 56a6051ca2..d8263ee986 100644 --- a/GRAPH_VERSION +++ b/GRAPH_VERSION @@ -1 +1 @@ -1 \ No newline at end of file +2 \ No newline at end of file diff --git a/native_client/BUILD b/native_client/BUILD index d7813d297f..5203eb4735 100644 --- a/native_client/BUILD +++ b/native_client/BUILD @@ -114,34 +114,26 @@ tf_cc_shared_object( ### => Trying to be more fine-grained ### Use bin/ops_in_graph.py to list all the ops used by a frozen graph. ### CPU only build, libdeepspeech.so file size reduced by ~50% - "//tensorflow/core/kernels:dense_update_ops", # Assign - "//tensorflow/core/kernels:constant_op", # Const - "//tensorflow/core/kernels:immutable_constant_op", # ImmutableConst + "//tensorflow/core/kernels:dense_update_ops", # Assign (remove once prod model no longer depends on it) + "//tensorflow/core/kernels:constant_op", # Placeholder + "//tensorflow/core/kernels:immutable_constant_op", # ImmutableConst (used in memmapped models) "//tensorflow/core/kernels:identity_op", # Identity "//tensorflow/core/kernels:softmax_op", # Softmax "//tensorflow/core/kernels:transpose_op", # Transpose "//tensorflow/core/kernels:reshape_op", # Reshape "//tensorflow/core/kernels:shape_ops", # Shape "//tensorflow/core/kernels:concat_op", # ConcatV2 - "//tensorflow/core/kernels:split_op", # Split - "//tensorflow/core/kernels:variable_ops", # VariableV2 "//tensorflow/core/kernels:relu_op", # Relu "//tensorflow/core/kernels:bias_op", # BiasAdd "//tensorflow/core/kernels:math", # Range, MatMul - "//tensorflow/core/kernels:control_flow_ops", # Enter "//tensorflow/core/kernels:tile_ops", # Tile - "//tensorflow/core/kernels:gather_op", # Gather "//tensorflow/core/kernels:mfcc_op", # Mfcc "//tensorflow/core/kernels:spectrogram_op", # AudioSpectrogram "//tensorflow/core/kernels:strided_slice_op", # StridedSlice "//tensorflow/core/kernels:slice_op", # Slice, needed by StridedSlice "//tensorflow/contrib/rnn:lstm_ops_kernels", # BlockLSTM - "//tensorflow/core/kernels:random_ops", # RandomGammaGrad "//tensorflow/core/kernels:pack_op", # Pack "//tensorflow/core/kernels:gather_nd_op", # GatherNd - #### Needed by production model produced without "--use_seq_length False" - #"//tensorflow/core/kernels:logging_ops", # Assert - #"//tensorflow/core/kernels:reverse_sequence_op", # ReverseSequence ], }) + if_cuda([ "//tensorflow/core:core", diff --git a/native_client/deepspeech.cc b/native_client/deepspeech.cc index 1ee22d5864..7dd9657428 100644 --- a/native_client/deepspeech.cc +++ b/native_client/deepspeech.cc @@ -67,6 +67,9 @@ struct StreamingState { vector audio_buffer_; vector mfcc_buffer_; vector batch_buffer_; + vector previous_state_c_; + vector previous_state_h_; + ModelState* model_; std::unique_ptr decoder_state_; @@ -233,7 +236,13 @@ void StreamingState::processBatch(const vector& buf, unsigned int n_steps) { vector logits; - model_->infer(buf.data(), n_steps, logits); + model_->infer(buf, + n_steps, + previous_state_c_, + previous_state_h_, + logits, + previous_state_c_, + previous_state_h_); const int cutoff_top_n = 40; const double cutoff_prob = 1.0; @@ -326,11 +335,6 @@ DS_SetupStream(ModelState* aCtx, { *retval = nullptr; - int err = aCtx->initialize_state(); - if (err != DS_ERR_OK) { - return err; - } - std::unique_ptr ctx(new StreamingState()); if (!ctx) { std::cerr << "Could not allocate streaming state." << std::endl; @@ -348,6 +352,8 @@ DS_SetupStream(ModelState* aCtx, ctx->mfcc_buffer_.reserve(aCtx->mfcc_feats_per_timestep_); ctx->mfcc_buffer_.resize(aCtx->n_features_*aCtx->n_context_, 0.f); ctx->batch_buffer_.reserve(aCtx->n_steps_ * aCtx->mfcc_feats_per_timestep_); + ctx->previous_state_c_.resize(aCtx->state_size_, 0.f); + ctx->previous_state_h_.resize(aCtx->state_size_, 0.f); ctx->model_ = aCtx; ctx->decoder_state_.reset(decoder_init(*aCtx->alphabet_, num_classes, aCtx->scorer_)); diff --git a/native_client/modelstate.cc b/native_client/modelstate.cc index c3fda2b938..7bb7f073ce 100644 --- a/native_client/modelstate.cc +++ b/native_client/modelstate.cc @@ -17,6 +17,7 @@ ModelState::ModelState() , sample_rate_(DEFAULT_SAMPLE_RATE) , audio_win_len_(DEFAULT_WINDOW_LENGTH) , audio_win_step_(DEFAULT_WINDOW_STEP) + , state_size_(-1) { } diff --git a/native_client/modelstate.h b/native_client/modelstate.h index 7f53c63e62..7179942186 100644 --- a/native_client/modelstate.h +++ b/native_client/modelstate.h @@ -28,6 +28,7 @@ struct ModelState { unsigned int sample_rate_; unsigned int audio_win_len_; unsigned int audio_win_step_; + unsigned int state_size_; ModelState(); virtual ~ModelState(); @@ -38,8 +39,6 @@ struct ModelState { const char* alphabet_path, unsigned int beam_width); - virtual int initialize_state() = 0; - virtual void compute_mfcc(const std::vector& audio_buffer, std::vector& mfcc_output) = 0; /** @@ -52,7 +51,13 @@ struct ModelState { * * @param[out] output_logits Where to store computed logits. */ - virtual void infer(const float* mfcc, unsigned int n_frames, std::vector& logits_output) = 0; + virtual void infer(const std::vector& mfcc, + unsigned int n_frames, + const std::vector& previous_state_c, + const std::vector& previous_state_h, + std::vector& logits_output, + std::vector& state_c_output, + std::vector& state_h_output) = 0; /** * @brief Perform decoding of the logits, using basic CTC decoder or diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc index 92d9c01472..9af0ae861a 100644 --- a/native_client/tflitemodelstate.cc +++ b/native_client/tflitemodelstate.cc @@ -4,14 +4,13 @@ using namespace tflite; using std::vector; int -tflite_get_tensor_by_name(const Interpreter* interpreter, - const vector& list, - const char* name) +TFLiteModelState::get_tensor_by_name(const vector& list, + const char* name) { int rv = -1; for (int i = 0; i < list.size(); ++i) { - const string& node_name = interpreter->tensor(list[i])->name; + const string& node_name = interpreter_->tensor(list[i])->name; if (node_name.compare(string(name)) == 0) { rv = i; } @@ -22,17 +21,17 @@ tflite_get_tensor_by_name(const Interpreter* interpreter, } int -tflite_get_input_tensor_by_name(const Interpreter* interpreter, const char* name) +TFLiteModelState::get_input_tensor_by_name(const char* name) { - int idx = tflite_get_tensor_by_name(interpreter, interpreter->inputs(), name); - return interpreter->inputs()[idx]; + int idx = get_tensor_by_name(interpreter_->inputs(), name); + return interpreter_->inputs()[idx]; } int -tflite_get_output_tensor_by_name(const Interpreter* interpreter, const char* name) +TFLiteModelState::get_output_tensor_by_name(const char* name) { - int idx = tflite_get_tensor_by_name(interpreter, interpreter->outputs(), name); - return interpreter->outputs()[idx]; + int idx = get_tensor_by_name(interpreter_->outputs(), name); + return interpreter_->outputs()[idx]; } void @@ -48,8 +47,8 @@ push_back_if_not_present(std::deque& list, int value) // output, add it to the parent list, and add its input tensors to the frontier // list. Because we start from the final tensor and work backwards to the inputs, // the parents list is constructed in reverse, adding elements to its front. -std::vector -tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) +vector +TFLiteModelState::find_parent_node_ids(int tensor_id) { std::deque parents; std::deque frontier; @@ -58,8 +57,8 @@ tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) int next_tensor_id = frontier.front(); frontier.pop_front(); // Find all nodes that have next_tensor_id as an output - for (int node_id = 0; node_id < interpreter->nodes_size(); ++node_id) { - TfLiteNode node = interpreter->node_and_registration(node_id)->first; + for (int node_id = 0; node_id < interpreter_->nodes_size(); ++node_id) { + TfLiteNode node = interpreter_->node_and_registration(node_id)->first; // Search node outputs for the tensor we're looking for for (int i = 0; i < node.outputs->size; ++i) { if (node.outputs->data[i] == next_tensor_id) { @@ -74,16 +73,13 @@ tflite_find_parent_node_ids(Interpreter* interpreter, int tensor_id) } } - return std::vector(parents.begin(), parents.end()); + return vector(parents.begin(), parents.end()); } TFLiteModelState::TFLiteModelState() : ModelState() , interpreter_(nullptr) , fbmodel_(nullptr) - , previous_state_size_(0) - , previous_state_c_(nullptr) - , previous_state_h_(nullptr) { } @@ -120,21 +116,21 @@ TFLiteModelState::init(const char* model_path, interpreter_->SetNumThreads(4); // Query all the index once - input_node_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "input_node"); - previous_state_c_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "previous_state_c"); - previous_state_h_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "previous_state_h"); - input_samples_idx_ = tflite_get_input_tensor_by_name(interpreter_.get(), "input_samples"); - logits_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "logits"); - new_state_c_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "new_state_c"); - new_state_h_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "new_state_h"); - mfccs_idx_ = tflite_get_output_tensor_by_name(interpreter_.get(), "mfccs"); + input_node_idx_ = get_input_tensor_by_name("input_node"); + previous_state_c_idx_ = get_input_tensor_by_name("previous_state_c"); + previous_state_h_idx_ = get_input_tensor_by_name("previous_state_h"); + input_samples_idx_ = get_input_tensor_by_name("input_samples"); + logits_idx_ = get_output_tensor_by_name("logits"); + new_state_c_idx_ = get_output_tensor_by_name("new_state_c"); + new_state_h_idx_ = get_output_tensor_by_name("new_state_h"); + mfccs_idx_ = get_output_tensor_by_name("mfccs"); // When we call Interpreter::Invoke, the whole graph is executed by default, // which means every time compute_mfcc is called the entire acoustic model is // also executed. To workaround that problem, we walk up the dependency DAG // from the mfccs output tensor to find all the relevant nodes required for // feature computation, building an execution plan that runs just those nodes. - auto mfcc_plan = tflite_find_parent_node_ids(interpreter_.get(), mfccs_idx_); + auto mfcc_plan = find_parent_node_ids(mfccs_idx_); auto orig_plan = interpreter_->execution_plan(); // Remove MFCC nodes from original plan (all nodes) to create the acoustic model plan @@ -168,50 +164,57 @@ TFLiteModelState::init(const char* model_path, TfLiteIntArray* dims_c = interpreter_->tensor(previous_state_c_idx_)->dims; TfLiteIntArray* dims_h = interpreter_->tensor(previous_state_h_idx_)->dims; assert(dims_c->data[1] == dims_h->data[1]); - - previous_state_size_ = dims_c->data[1]; - previous_state_c_.reset(new float[previous_state_size_]()); - previous_state_h_.reset(new float[previous_state_size_]()); - - // Set initial values for previous_state_c and previous_state_h - memset(previous_state_c_.get(), 0, sizeof(float) * previous_state_size_); - memset(previous_state_h_.get(), 0, sizeof(float) * previous_state_size_); + assert(state_size_ > 0); + state_size_ = dims_c->data[1]; return DS_ERR_OK; } -int -TFLiteModelState::initialize_state() +void +TFLiteModelState::copy_vector_to_tensor(const vector& vec, + int tensor_idx, + int num_elements) { - /* Ensure previous_state_{c,h} are not holding previous stream value */ - memset(previous_state_c_.get(), 0, sizeof(float) * previous_state_size_); - memset(previous_state_h_.get(), 0, sizeof(float) * previous_state_size_); + float* tensor = interpreter_->typed_tensor(tensor_idx); + int i; + for (i = 0; i < vec.size(); ++i) { + tensor[i] = vec[i]; + } + for (; i < num_elements; ++i) { + tensor[i] = 0.f; + } +} - return DS_ERR_OK; +void +TFLiteModelState::copy_tensor_to_vector(int tensor_idx, + int num_elements, + vector& vec) +{ + float* tensor = interpreter_->typed_tensor(tensor_idx); + for (int i = 0; i < num_elements; ++i) { + vec.push_back(tensor[i]); + } } void -TFLiteModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) +TFLiteModelState::infer(const vector& mfcc, + unsigned int n_frames, + const vector& previous_state_c, + const vector& previous_state_h, + vector& logits_output, + vector& state_c_output, + vector& state_h_output) { const size_t num_classes = alphabet_->GetSize() + 1; // +1 for blank // Feeding input_node - float* input_node = interpreter_->typed_tensor(input_node_idx_); - { - int i; - for (i = 0; i < n_frames*mfcc_feats_per_timestep_; ++i) { - input_node[i] = aMfcc[i]; - } - for (; i < n_steps_*mfcc_feats_per_timestep_; ++i) { - input_node[i] = 0; - } - } - - assert(previous_state_size_ > 0); + copy_vector_to_tensor(mfcc, input_node_idx_, n_frames*mfcc_feats_per_timestep_); // Feeding previous_state_c, previous_state_h - memcpy(interpreter_->typed_tensor(previous_state_c_idx_), previous_state_c_.get(), sizeof(float) * previous_state_size_); - memcpy(interpreter_->typed_tensor(previous_state_h_idx_), previous_state_h_.get(), sizeof(float) * previous_state_size_); + assert(previous_state_c.size() == state_size_); + copy_vector_to_tensor(previous_state_c, previous_state_c_idx_, state_size_); + assert(previous_state_h.size() == state_size_); + copy_vector_to_tensor(previous_state_h, previous_state_h_idx_, state_size_); interpreter_->SetExecutionPlan(acoustic_exec_plan_); TfLiteStatus status = interpreter_->Invoke(); @@ -220,25 +223,23 @@ TFLiteModelState::infer(const float* aMfcc, unsigned int n_frames, vector return; } - float* outputs = interpreter_->typed_tensor(logits_idx_); + copy_tensor_to_vector(logits_idx_, n_frames * BATCH_SIZE * num_classes, logits_output); - // The CTCDecoder works with log-probs. - for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { - logits_output.push_back(outputs[t]); - } + state_c_output.clear(); + state_c_output.reserve(state_size_); + copy_tensor_to_vector(new_state_c_idx_, state_size_, state_c_output); - memcpy(previous_state_c_.get(), interpreter_->typed_tensor(new_state_c_idx_), sizeof(float) * previous_state_size_); - memcpy(previous_state_h_.get(), interpreter_->typed_tensor(new_state_h_idx_), sizeof(float) * previous_state_size_); + state_h_output.clear(); + state_h_output.reserve(state_size_); + copy_tensor_to_vector(new_state_h_idx_, state_size_, state_h_output); } void -TFLiteModelState::compute_mfcc(const vector& samples, vector& mfcc_output) +TFLiteModelState::compute_mfcc(const vector& samples, + vector& mfcc_output) { // Feeding input_node - float* input_samples = interpreter_->typed_tensor(input_samples_idx_); - for (int i = 0; i < samples.size(); ++i) { - input_samples[i] = samples[i]; - } + copy_vector_to_tensor(samples, input_samples_idx_, samples.size()); TfLiteStatus status = interpreter_->SetExecutionPlan(mfcc_exec_plan_); if (status != kTfLiteOk) { @@ -261,8 +262,5 @@ TFLiteModelState::compute_mfcc(const vector& samples, vector& mfcc } assert(num_elements / n_features_ == n_windows); - float* outputs = interpreter_->typed_tensor(mfccs_idx_); - for (int i = 0; i < n_windows * n_features_; ++i) { - mfcc_output.push_back(outputs[i]); - } + copy_tensor_to_vector(mfccs_idx_, n_windows * n_features_, mfcc_output); } diff --git a/native_client/tflitemodelstate.h b/native_client/tflitemodelstate.h index ee5bfb6a91..3a6d4971e6 100644 --- a/native_client/tflitemodelstate.h +++ b/native_client/tflitemodelstate.h @@ -14,10 +14,6 @@ struct TFLiteModelState : public ModelState std::unique_ptr interpreter_; std::unique_ptr fbmodel_; - size_t previous_state_size_; - std::unique_ptr previous_state_c_; - std::unique_ptr previous_state_h_; - int input_node_idx_; int previous_state_c_idx_; int previous_state_h_idx_; @@ -40,13 +36,28 @@ struct TFLiteModelState : public ModelState const char* alphabet_path, unsigned int beam_width) override; - virtual int initialize_state() override; - virtual void compute_mfcc(const std::vector& audio_buffer, std::vector& mfcc_output) override; - virtual void infer(const float* mfcc, unsigned int n_frames, - std::vector& logits_output) override; + virtual void infer(const std::vector& mfcc, + unsigned int n_frames, + const std::vector& previous_state_c, + const std::vector& previous_state_h, + std::vector& logits_output, + std::vector& state_c_output, + std::vector& state_h_output) override; + +private: + int get_tensor_by_name(const std::vector& list, const char* name); + int get_input_tensor_by_name(const char* name); + int get_output_tensor_by_name(const char* name); + std::vector find_parent_node_ids(int tensor_id); + void copy_vector_to_tensor(const std::vector& vec, + int tensor_idx, + int num_elements); + void copy_tensor_to_vector(int tensor_idx, + int num_elements, + std::vector& vec); }; #endif // TFLITEMODELSTATE_H diff --git a/native_client/tfmodelstate.cc b/native_client/tfmodelstate.cc index 866775e4d2..5393ed40ca 100644 --- a/native_client/tfmodelstate.cc +++ b/native_client/tfmodelstate.cc @@ -98,6 +98,9 @@ TFModelState::init(const char* model_path, n_context_ = (shape.dim(2).size()-1)/2; n_features_ = shape.dim(3).size(); mfcc_feats_per_timestep_ = shape.dim(2).size() * shape.dim(3).size(); + } else if (node.name() == "previous_state_c") { + const auto& shape = node.attr().at("shape").shape(); + state_size_ = shape.dim(1).size(); } else if (node.name() == "logits_shape") { Tensor logits_shape = Tensor(DT_INT32, TensorShape({3})); if (!logits_shape.FromProto(node.attr().at("value").tensor())) { @@ -134,66 +137,83 @@ TFModelState::init(const char* model_path, return DS_ERR_OK; } -int -TFModelState::initialize_state() +Tensor +tensor_from_vector(const std::vector& vec, const TensorShape& shape) { - Status status = session_->Run({}, {}, {"initialize_state"}, nullptr); - if (!status.ok()) { - std::cerr << "Error running session: " << status << std::endl; - return DS_ERR_FAIL_RUN_SESS; + Tensor ret(DT_FLOAT, shape); + auto ret_mapped = ret.flat(); + int i; + for (i = 0; i < vec.size(); ++i) { + ret_mapped(i) = vec[i]; + } + for (; i < shape.num_elements(); ++i) { + ret_mapped(i) = 0.f; } + return ret; +} - return DS_ERR_OK; +void +copy_tensor_to_vector(const Tensor& tensor, vector& vec, int num_elements = -1) +{ + auto tensor_mapped = tensor.flat(); + if (num_elements == -1) { + num_elements = tensor.shape().num_elements(); + } + for (int i = 0; i < num_elements; ++i) { + vec.push_back(tensor_mapped(i)); + } } void -TFModelState::infer(const float* aMfcc, unsigned int n_frames, vector& logits_output) +TFModelState::infer(const std::vector& mfcc, + unsigned int n_frames, + const std::vector& previous_state_c, + const std::vector& previous_state_h, + vector& logits_output, + vector& state_c_output, + vector& state_h_output) { const size_t num_classes = alphabet_->GetSize() + 1; // +1 for blank - Tensor input(DT_FLOAT, TensorShape({BATCH_SIZE, n_steps_, 2*n_context_+1, n_features_})); - - auto input_mapped = input.flat(); - int i; - for (i = 0; i < n_frames*mfcc_feats_per_timestep_; ++i) { - input_mapped(i) = aMfcc[i]; - } - for (; i < n_steps_*mfcc_feats_per_timestep_; ++i) { - input_mapped(i) = 0.; - } + Tensor input = tensor_from_vector(mfcc, TensorShape({BATCH_SIZE, n_steps_, 2*n_context_+1, n_features_})); + Tensor previous_state_c_t = tensor_from_vector(previous_state_c, TensorShape({BATCH_SIZE, (long long)state_size_})); + Tensor previous_state_h_t = tensor_from_vector(previous_state_h, TensorShape({BATCH_SIZE, (long long)state_size_})); Tensor input_lengths(DT_INT32, TensorShape({1})); input_lengths.scalar()() = n_frames; vector outputs; Status status = session_->Run( - {{"input_node", input}, {"input_lengths", input_lengths}}, - {"logits"}, {}, &outputs); + { + {"input_node", input}, + {"input_lengths", input_lengths}, + {"previous_state_c", previous_state_c_t}, + {"previous_state_h", previous_state_h_t} + }, + {"logits", "new_state_c", "new_state_h"}, + {}, + &outputs); if (!status.ok()) { std::cerr << "Error running session: " << status << "\n"; return; } - auto logits_mapped = outputs[0].flat(); - // The CTCDecoder works with log-probs. - for (int t = 0; t < n_frames * BATCH_SIZE * num_classes; ++t) { - logits_output.push_back(logits_mapped(t)); - } + copy_tensor_to_vector(outputs[0], logits_output, n_frames * BATCH_SIZE * num_classes); + + state_c_output.clear(); + state_c_output.reserve(state_size_); + copy_tensor_to_vector(outputs[1], state_c_output); + + state_h_output.clear(); + state_h_output.reserve(state_size_); + copy_tensor_to_vector(outputs[2], state_h_output); } void TFModelState::compute_mfcc(const vector& samples, vector& mfcc_output) { - Tensor input(DT_FLOAT, TensorShape({audio_win_len_})); - auto input_mapped = input.flat(); - int i; - for (i = 0; i < samples.size(); ++i) { - input_mapped(i) = samples[i]; - } - for (; i < audio_win_len_; ++i) { - input_mapped(i) = 0.f; - } + Tensor input = tensor_from_vector(samples, TensorShape({audio_win_len_})); vector outputs; Status status = session_->Run({{"input_samples", input}}, {"mfccs"}, {}, &outputs); @@ -206,9 +226,5 @@ TFModelState::compute_mfcc(const vector& samples, vector& mfcc_out // The feature computation graph is hardcoded to one audio length for now const int n_windows = 1; assert(outputs[0].shape().num_elements() / n_features_ == n_windows); - - auto mfcc_mapped = outputs[0].flat(); - for (int i = 0; i < n_windows * n_features_; ++i) { - mfcc_output.push_back(mfcc_mapped(i)); - } + copy_tensor_to_vector(outputs[0], mfcc_output); } diff --git a/native_client/tfmodelstate.h b/native_client/tfmodelstate.h index c3dc770855..0ef7dcfe56 100644 --- a/native_client/tfmodelstate.h +++ b/native_client/tfmodelstate.h @@ -24,11 +24,13 @@ struct TFModelState : public ModelState const char* alphabet_path, unsigned int beam_width) override; - virtual int initialize_state() override; - - virtual void infer(const float* mfcc, + virtual void infer(const std::vector& mfcc, unsigned int n_frames, - std::vector& logits_output) override; + const std::vector& previous_state_c, + const std::vector& previous_state_h, + std::vector& logits_output, + std::vector& state_c_output, + std::vector& state_h_output) override; virtual void compute_mfcc(const std::vector& audio_buffer, std::vector& mfcc_output) override; From 4b305d2f5ef409f129c4638561f55b3707d9e8dd Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Fri, 14 Jun 2019 15:11:21 -0300 Subject: [PATCH 4/6] Remove --use_seq_length flag --- DeepSpeech.py | 4 ++-- README.md | 2 +- bin/run-tc-ldc93s1_tflite.sh | 2 +- util/flags.py | 1 - 4 files changed, 4 insertions(+), 5 deletions(-) diff --git a/DeepSpeech.py b/DeepSpeech.py index 7e92e2023e..400a506737 100755 --- a/DeepSpeech.py +++ b/DeepSpeech.py @@ -588,7 +588,7 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): rnn_impl = rnn_impl_lstmblockfusedcell logits, layers = create_model(batch_x=input_tensor, - seq_length=seq_length if FLAGS.use_seq_length else None, + seq_length=seq_length if not FLAGS.export_tflite else None, dropout=no_dropout, previous_state=previous_state, overlap=False, @@ -630,7 +630,7 @@ def create_inference_graph(batch_size=1, n_steps=16, tflite=False): 'input_samples': input_samples, } - if FLAGS.use_seq_length: + if not FLAGS.export_tflite: inputs.update({'input_lengths': seq_length}) outputs = { diff --git a/README.md b/README.md index bf593a2751..91447cfac8 100644 --- a/README.md +++ b/README.md @@ -343,7 +343,7 @@ Refer to the corresponding [README.md](native_client/README.md) for information ### Exporting a model for TFLite -If you want to experiment with the TF Lite engine, you need to export a model that is compatible with it, then use the `--nouse_seq_length --export_tflite` flags. If you already have a trained model, you can re-export it for TFLite by running `DeepSpeech.py` again and specifying the same `checkpoint_dir` that you used for training, as well as passing `--nouse_seq_length --export_tflite --export_dir /model/export/destination`. +If you want to experiment with the TF Lite engine, you need to export a model that is compatible with it, then use the `--export_tflite` flags. If you already have a trained model, you can re-export it for TFLite by running `DeepSpeech.py` again and specifying the same `checkpoint_dir` that you used for training, as well as passing `--export_tflite --export_dir /model/export/destination`. ### Making a mmap-able model for inference diff --git a/bin/run-tc-ldc93s1_tflite.sh b/bin/run-tc-ldc93s1_tflite.sh index bab6d7b004..b402d7d954 100755 --- a/bin/run-tc-ldc93s1_tflite.sh +++ b/bin/run-tc-ldc93s1_tflite.sh @@ -20,4 +20,4 @@ python -u DeepSpeech.py --noshow_progressbar \ --export_dir '/tmp/train_tflite' \ --lm_binary_path 'data/smoke_test/vocab.pruned.lm' \ --lm_trie_path 'data/smoke_test/vocab.trie' \ - --export_tflite --nouse_seq_length + --export_tflite diff --git a/util/flags.py b/util/flags.py index b0f824ffbe..a4cb497953 100644 --- a/util/flags.py +++ b/util/flags.py @@ -73,7 +73,6 @@ def create_flags(): f.DEFINE_string('export_dir', '', 'directory in which exported models are stored - if omitted, the model won\'t get exported') f.DEFINE_boolean('remove_export', False, 'whether to remove old exported models') f.DEFINE_boolean('export_tflite', False, 'export a graph ready for TF Lite engine') - f.DEFINE_boolean('use_seq_length', True, 'have sequence_length in the exported graph(will make tfcompile unhappy)') f.DEFINE_integer('n_steps', 16, 'how many timesteps to process at once by the export graph, higher values mean more latency') f.DEFINE_string('export_language', '', 'language the model was trained on e.g. "en" or "English". Gets embedded into exported model.') From ea1422d47b0ba5736ccda19d4bbcabc51c93b70a Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Mon, 17 Jun 2019 08:42:18 -0300 Subject: [PATCH 5/6] Document vector/tensor copy functions --- native_client/tflitemodelstate.cc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/native_client/tflitemodelstate.cc b/native_client/tflitemodelstate.cc index 9af0ae861a..8c61a83f10 100644 --- a/native_client/tflitemodelstate.cc +++ b/native_client/tflitemodelstate.cc @@ -170,6 +170,8 @@ TFLiteModelState::init(const char* model_path, return DS_ERR_OK; } +// Copy contents of vec into the tensor with index tensor_idx. +// If vec.size() < num_elements, set the remainder of the tensor values to zero. void TFLiteModelState::copy_vector_to_tensor(const vector& vec, int tensor_idx, @@ -185,6 +187,7 @@ TFLiteModelState::copy_vector_to_tensor(const vector& vec, } } +// Copy num_elements elements from the tensor with index tensor_idx into vec void TFLiteModelState::copy_tensor_to_vector(int tensor_idx, int num_elements, From f12ea5e958716ea9648833fb25cd6b204b45b4e0 Mon Sep 17 00:00:00 2001 From: Reuben Morais Date: Tue, 18 Jun 2019 19:23:16 -0300 Subject: [PATCH 6/6] Add a test for interleaved/concurrent streams with a single model instance --- data/smoke_test/new-home-in-the-stars-16k.wav | Bin 0 -> 114794 bytes native_client/test/concurrent_streams.py | 78 ++++++++++++++++++ taskcluster/tc-python-tests-prod.sh | 2 + taskcluster/tc-tests-utils.sh | 21 +++++ 4 files changed, 101 insertions(+) create mode 100644 data/smoke_test/new-home-in-the-stars-16k.wav create mode 100644 native_client/test/concurrent_streams.py diff --git a/data/smoke_test/new-home-in-the-stars-16k.wav b/data/smoke_test/new-home-in-the-stars-16k.wav new file mode 100644 index 0000000000000000000000000000000000000000..bbefd166f5d93f3634d1b3c2a2af90b13081fc04 GIT binary patch literal 114794 zcmXV(2Yggj)5dSv-R!3KgceF5^e(;kjz|*`6ckahqlnmjRRn#lsIQ<1s33|`q)C(B zJE8X$Y7)}6-uC@x{kXqPHoNzpGv}FS=FFKh_cF9!@7|AXQI!X~jqEk?xmRi=D~h6W zyxW;KZz+nVBr6lfzcRiv=O~Kf*pB?O9m_EtMKK+NcPgKUc^Bj;cl`Xk=Tz`B;FR$9 zzh}zTI_JeH*-D(^Rt(3jR8mrvG{ws&c}hK{l~PB^;j<)u*Wh~(XIJ5GmlC5C(W9oM zD?#U~bCFjON3nC(x!@dfwmJFECFeZHaSl~URB9?+XrYc$nf7mUUc8b)@0O!8mQ3Zc z6N4o6X)WT^p!bk-*}3A}a&|Z;oKojD@AI8Pdat5XQv!@6gSK=fi&ji~e+_~wTqO~8Kx%}MYtaQGiy)Djj&dbiT&R@=2XNvO)uL<M<0~4w$wUh$z z9jLsl{H`olzEcJ$LzE|#@0A~vN0ld(NuZsobW}zvgE>DPmd4O`F)X~|9CQBX%yTw@ zOi2`-3os*5X@SOTDkGGMwBDBU_d0i+6s0$94FrXOAY(bwZ#Q0jI6Hy%W5LL98Y{z; z)=E2Y%mKT8%G;bjNokSt0l=K%RWQT?DeL z`J@<$cEa2veC7uW!8}9p&{{T3%S6(A=XXX>6)iJZ=TqJ^7I19~`zj#neo zYM6V*`PKQ&xejv4U|K`zt2Cpp%4pAz-c0&9?`(EX!{rB+$x0ja(if||iLR@Gniq}M zqs8O!a5o&^1_y;{m6XG@e3~o0QSRly_>M5V7cS;aXqq-l%)osB4veQ}V41P_JITsmEfJY+CtE^Oj#~viS z=qz<+J3qsab7T)nAA=smHWaLS z3uyfV8jh0-heq)##?$#=!euy^gA_g_KJTmp_4gR{DP&&=062N6Le= zreF<8Xs?|9>naTyZx4>)c$GSgY67<38^+gw8D(Hl4KLCb3+d0uUsb*ZpD{3UrZPo& z9n7A>#=GG&+VXuJSf#<}`br_bQvC92=UZn3J)UKJ$DA^3HW8@{!Rsnq6dwEVC5N2- zocS4c|2C{$?yPs#pwEj=ePn8gj!WR?F|bGi>wLcbhwE>E>gUcz&YSMM<-7oAx5Bp% zL2@a5j}|jBW=##YWHRsKg_|K(KGcRay8g@v$|*yB1jX;&*psYt8o!nOkDuOkMQV z7*-D8cvg8@xnCKFF8U&ILu4BW%UWR<=dnxCg2tHBXsZGrwFF(%f-fF8a2Xk9<1N-= zIhVogSNJ*~|8bSizr-hOueEtjHtOL2@Yd3N8!SC z&f1Lrb|B?ye%@dft;x{>&b_X@!N?miuBxEXgb|;{V>G8v3+87a!)d%rK6<|g5@qo1 zD9pLd?~AlapVOVzB3Pq??O0sf7{6Q#{A*zq zNBN{J91(q9$AfK!BO3a4q5bZ(To2}6Ld%yqu0?0L!!W(f$poQPtgSM0p$}^>gq3M{ z+aU8+XEZyEdEx=)h!*IvJAK9T&5o#dyMP6mSn@qQZIHe)@dTTBe}(xho_WQk2B7JaR#Gqj?4$}j-_zqC>l%y^DB&QKmI{}pN>i#gGb9^9@Xem zv~!16_c4y8plINCDuMk2@NzIxw`8=PVQLp<$(C^OD=-{HPmd~}D+_opUaBUjm16@p z;Xxv(+F*Ja*;M5y^Up4rwHJAI(Ee$paG}LAtR)#K5}6OuVN*OU)}+VE92bz~7;}$- zc2fA}9)2MSUSD>a<6~l!vrcx@qIbgSHLz?M>=vvF(NYNP4&plwz{K6??xJ&&{%&vX zji#ZOG-mY2&}TicJOdXRfl__?xr8rPl-;1apE2L#C}-S;)068u(!zBxze#l1jO*Kh zbvH21TjwiJ0X;c`Fb9PvDHjCU$h1<=%GBs+x$ z|Mhxxm?>|8;3}}$3g;zGuZ^xRquV4on_=Z(ft-$OUA~^ zz;=+7)lD!2TSpy+14#R;7SmTfR?+OAV1X5k>_ZStS^ZGd=3j zi(!J7Xzm#O-$ELZ{|dMs<6RgYTtGYcrU7K9P?-p@Y18l(yKNYx=%oz_LSD15$f;p zI^QSqX$d&WO8F!blq2~C&XYJhgR83XvjsD07SdMc3I**7m*Z%~z&>Rzs=_CkjN}~t zFPp!v;88cxT7Xf>xiLssjxAJ1TDd9(E82r+h=9*Ucvr$Fb)%XM;fcjpmcZF~MkcGX z{Tw#P-2=HY<{@F-cCN9w@-PfP3HrzQeFxi~M=%R!=K;)T(inF=E$ zuC>u+3bt5=B#Fqr7Eay;{T+;J1KvN(=eHR7QGDoDeE)ntyUJhJK;i)J4sz_{n@#Zj z2vVFx)_BGwv3fCQ9pSgbJlPU%#2`gwwC!fRdAv)6r?T$v6`h;%@XB?N(m|q9yg>$| zsHVvLuz`7aKaw3{)M50dFp3B|&gJYhM$!Zp=HWdW!nFFdnFZ4yCDxvXCwWWxgm)kC zcXOCpi|+y;{12`D!x(qq50+rd`|x4KAQ%Muc+RugJ;AlIa=XEq<=}ps^J7@8l_Rym=BeG9o9GoHavx&6px$C7^?8D5|}hY;uG|J5}n?p|C``(k)ChB zFO#3e^m&F)ijX9cF&oVCdDz8t(0v%InS#|kiwAm#)$&;SX%Ev{(0@;u-;j50K|Tir z>+@X`TJ*uX9JKAi8Z4MzO8W(9crV(Rig*4c>Z|91(Pq$($J)JkEt{hpZb}R!>+72E z;2v#NrPa3BTMT?@&&WqHYdp@oAS}KGdNK=a0J99{(FC|&Kk8``@xivwZDn9^18MSM;5D?G&)FLK&V>(yn5!DWTk(hD&(4CO28Zv0 zXaQJm#%5L$8LdXkKRd6oewpG-gA4DWw*}bRAB=YyOxOrdPIG<*xEG_@zxeI|vR`Es z)##x%Tpt9YwLl{dEhO+Wmx%2_#@+~fmNjNSEV~uI2e2l8T6u)oawIe3U_8*npfZBL zx^Z4Z&Ut{phGTc#@e_5>WW%U#y1|MwjIRu&Z^ON<#0zur0|#O54Cg7>`Xy`TImo*S zO)Z5b5`mwEe-+F_GS4)p?ZM;+Ct=aGqTVZ!HgjQ_tSHl?9-uB7bK!e6VlVOE`_NK8 zKl2&CtQitGTSG6|peWIjcteTf>cg62;9rK_NwnsGmSlG0;eij!*U^(iRWifwMVp0O z86mdoj6F1gf1P1{Yb5LmW`mj8C$bux0)yWnH}Mw7(^!k-N+x2_>G0C#YCm(9=-exr<23r}h6Z|pbO(5vMfTz(wkrN>0kOj4&QNDG z9C(p;j}kXbah`T&!uMZcywsLjQwbe3VLoXS<;T0qXUdmkGUs8( zFS06l3LN@jUoH5)GIP!ikvckrl>2WX6SxS}3b*7K58Opkt21!gDXPbC*Si0k^$MlE#Koc`-`hO7eRl?XQ_ zOQ(bRaYl0mJK2KI`vp$^iG2xwit(M9=%OL5?FWYf&N=`_>+xPEK}q40Wa-0 ziZZ7bXk_8tTf*KN@T3WPmD%$Z#xfSZj7Ogx;Y?%jko=aW9Ktfhr%UeXPjLPPYmgP` z66|&tIA4Ubva=v-gnlRJ2$l}Q^NgkGdl5-A$W(ni?1-b8m*M68Y1S}*AkVrP{IMR&b zO5y0^=uKANxo|R_7HT7h__x0BEDgk3q2X51E0fVi4BW_q2Mw4j?+1fw$o&GV49S#r z#5THf?vrpsVwDcG+#ZZ%cIyD*|F*5eaAByIF&3b?qNqnY9hFI<3pcYwHe-><>n74m zoEML*&Cp^AqdJ5oAHm9QqMLGXT?eza@aYA{dI98r#q$c&<-B{ex)YRdVAVHZ(OR%6 z@M6%SsD zCaW>G6``Gj=*;50yHS0W(CTJ-ln8GpEWSn`lBY>zJhkb$kWV)wPYEragDZzXBLOXm z*USR>2rCuYr%Ivq?%-GpZHs>k!_<37BGF?4s8#_X$vX$=SK=SZOPqiKb@3A&7-LH; zv={oR%(Uh_Bl(h+lUaB;cNFGrR-2v zpec#Z0vy5tiL1o}v`2<|U|I&tV)0$FJ0-h(IY`+S+mJlF?A4aSX%&Y0qD(4>pF8lg z|6@fxli6_!>paP!Tp+UBg8diM<91@=Lr7Q#y5i5>tTnGjd#@mWmP1c6hsJ_uJlINJ zJ_YS(fm1S{)~DYb^xK!2uOWWMA?lJWe;yLI=64mYmbF(1mefLfg>a)TxHLfS)}WJ$ z9uv@L1N_WG%ot{~do9zCpQW?F-1{)68u z;l?$vISe|d`R)Q|OJ=qZHWl*CC3LQXV$RWUQew_0ct4b5 zFe@`zYuCc3zC<)Oj#;N6KO{) zd^u>|g@dxwS_S+9{I=kcI~pNM6n~TNHPE|-%rA9!* zE`*z2ELWmf4NomQ2OWs1B~pL%CxDkf4bB^pq!{a7k3H{&>pvmUF|a;{tapho!?0cU<&)uDEs)4Vh90ye zJ0@wcxee_X(o;MUgrHD@{3_?g!?R6T#vW|r0NOkd9rJngDVmfWKk?(o8Sw$w@NZV^ zD%=nYxP(S4gLO@M=ua%y8x}tfiyz|syV1Pdt3=N)5G6f==3YmqI1JZddb6xU(W||$!z7o@@k-25@qfdq`SbkyWv#< zQXaxiBugo?YYbMNjxRgNUnQ_oe8E*vluChx{0W|DFCsK_d^OWoxh>zMuD-z8s zQSTOy4U4a?Onc&et|O7u3&`%7iyn^B-W~ADM7FL`7Sw}>^+88=uM%KuJv^Jjx<;}% z_s~NgnwFy>{=6wj4CRx`Fd!IZhL;g~xUM>%${MmA`bz~Z$&A>@EHPvR9R~P$9$9xW z3K_9vmaEW?i|EEd`s0jJ)=2SDzb5(YLJl82R-+~Hdez}xC%7^ImPod)60dALpGIFY z&z95bW!je+CWK9d=tE|G+3geS_={EFf2pT<4^I9}9$B)O^tP#D-Fkr42_ap5r*Z-a+~0Lw_V~!Rw`HX14=& z)ey~yN60`gd$5OVFt`LipknWP(YEaQ$mgi*w-t(YmJ^8!>Qy>_}_+ns@0iq|-zU<{)z#mAZ(l&Sz!{}}@?g%Z*PS#~c zF1wBuj7ZjmvO9E&Ys=9^08FHYMrQHm9FiG%6z@J58GA=@Zi%hb;>bb!_vlAvB;n3Q zbhZbHG$gAITa(d5Q(BOyr6V4;Cnyi3t)XCB1^tLO4TI%8=Cs8^lqZ@ZOowu zk*UVGyO=^mWZ|_b0J*MW#%0M0^+eGr*b7aKjeVv9Y!gc<(qg# z@hADwIbT-%@mO&U?8b!sw~$}tU5&&yI7`+G<>+55uN0jm;*;)io~-s7U?H-mb0bYF zETjvg6CPJ%bj{#N1Vj(Qo}(a>k1Qw9*bX$hj?d16-#z*|1WMa!T~_^4Gjx(eB9y!I zQAFPcoVx}GcQe18K<@RNyNOnB@NIx|WVaz6T-wuTji?`y?2p76&0$nS@Q~W50PVzp zmDqGM&Z`OoC5I%DXLghiCbNeRl=85o`dpP1jY1qSmns4WM9SfoA2i*}z4^?lvI?jP zVo= z^o20SWL$Nykk-uq^^m76ey0=6c!U_FKk@#X%+NCLw#Rb1F&_Y~TFOTnJcU)?qDuacLT}X3RhTNP+LN@F&&vK= zVut0gVllJMT&((OV*Zctk{>!B@bg!`TZp~Sj%HkBw|*OPL|D5^rDIE^PenIU!PTFa z)D<@2ui8=mNu`|BNW`KW*}XL3ZXqMuN(+*kwy<28TMFRaE$q^TbmIGSV6Ei(ZXxG- zn6?8QUE?MBFv+S&P2(COt+TXx4kmBFiVou$3c%89ZZ*D5&QXllD5ONE)^Mj@CiQ?RsIN9w)4I0ib%Ha zBGxF8z(q826WlM;#tmeNWu&s2XbVq6SadP19pRh1=<_OC_F~E6X{GWg1IF~_cbGN> z-NuY9hq3l!JbAP*j@eggSen7>41AERZ_gohDRN6?f>Z%YbS*0i*)Ndw+!el&y%h2K zlB;h;`?cWzSfc4{P#lA18q)TQ{Ou5Bj=_$lmSU3fG1VJVKQ{{-mw7;ZdjxM=%E*6W zk7^F~upS96fr;=kA9?&(tRJ2pL=&<*w2}A!dgfM)p$|Nfc}`aS21g8<3q<1qsdEta zOJtOYX4CLeQvW0o!4hWHd1ygakoy?TPVg<^CHjy|$fhWsSD0_ABA-+}Uqf5M*+e2R zi4mpNMJk=fkwY882>TOb4aW)|gOm5;#|Gjv2g0&{XF=Jg$%36y^DPWbhT(NVsvhmf zv(`C=*Z%_yc7XjwWZDEHR%3Mw@dmrldjz(WgZLGsI}R^poN-u&SW-58mGk0_uA=B^dmc z8aEfrmX*o~vi8$i)qTdg?n7pg=^P^%c}v=ndKbx!H%2-ad!t9dyA-Qi%;|!3m#>>G$cK^f*&!flNv~mi`WL}XS z$1|KEG3d`=Iv2gJg2|FCms%^C2^*m?H`P~CAL!7_3C>u`8Gmrj9@>>1RM{=7;0)2f zXkZQBh@ZHE-1`|z0UFxDXEN`WgUu**E?#1lI~shuN9U+!k4y3yhUrB7!iT+yRMY{2c)QUSW z=xEdvN^G~8ek7_|j;DQ_y{EUq^-n%sk00GY|JU&pvgSL51h=tWiT;|wI*Ix#!IC!o zR2gRxjFl{!D>_3-Tp)EocZq;LCNE9t5{$w9*__O0LYy8HxB} z@i$UmvWL-WTzwlmUW!yFqNr>~;(gI$4pK@)zZG0$=TYL-ogA`baF%g;XfY1PwFP;} zv2>s>$>_M=sfklG=k@Mk^82Fuap^rkB_) zd<{;FWpA|=ZAum}xl zta((vPlMUtlXw{$!L9Drn0pR_=bup~pVFBzv9-%GK#p z>QW>(BAJr)wDk{v%N~@}4wYbcvP)D4)FWWG9iBEBX`K0=UQ3&NhG;}m}Mj0_ueM{x*E`A=u_Aff!@Mm|O*32X$7)6ou z0CC7%X0EnW+bo2UzY%-=gO#0N7WGn%PqB%~*=L5q@2X@!o`@l{eecNSes zlqK1clSn4@8j>U25sgOAphJl}WoK0?=p-VOaa~7#$yrJzN;dQ1?P!E{kg@&18s{tW z>%ZfXXObC`3K6-B=}%^~O;H;v;pZuID?4l%jJGeNmI~SK;5;Jg$Nt?hl6X5v%W@~0 zP~9Tdqvq-2RujM2OdF+J}_Yzx*q`_UQ@ni=VJz5b0RgsFYt3XY;VhX?-03+2mdy- z(2EgN0`)|Eo9y8j;3@mLQn5J)AGiz@rM~2MeBv6uE9H!0^b(IIWv!NuMw-*B+(o^R35Kj?ln`uhz1EyrhEr&U=U9>TNS!wbr; zfYh1$VUzH30JsTuO+mN{uNE*t?4dKby@O>+&A|w?DmzY+2iU?)uo-O{SfA{5NWM3V z>t!#cFH%0jHIrF23mr`>qv!`SQcY)O@K3&boq)uiTyjqDZ9OGOA zuOutgJFrAnkCH)`inIuy+~Kd}sAY9$9X*7o?@1W`M0EWp{#sTbDdjCfV{FdPt%l@wgKE$=;P@oClG0k~_eJ z)lxAeS(nb}U-rzK^Z77l`Cg1(s)Z${Y6A|9;6fcnF8uAwcirLZL|8GLqZZhhv@8`V z6n4jD_q2)!XoOGdz+zm~L~FgMsYcOMQS;WD4$5{c-{hrP8Lf zAz1{upKcEG=X89{=h)Q>T3*F@Tlg*a)de`eD$!0Z63LyUGJ{DyQ6^*V!|eGOHa(bG zM(*B_^>IHwm74R)$e#k@Rlu$uS4u9i3fBm48gsSOg-M=D*4VN?RWBO5C4hf>W;)5P zxxw@n+L!92oxJ3(38_b1%m`(#D<3agz`G=7WSJo(XCQlLdy#w#(UeprN`0_kAo;Qa zur7lCN3pHt_)(RyG^a1gGf9PF8huMNTpx+$Zs7aShwPlDFq6w&Y~mT(M_FG5eM#+? z+>e{V$fV9k@|v>ST{o&f$uP+Z<18|r1;wrCWewQ=3ZIu?d7I#^+z+^raUH{B9_4od z5=q`cDjX#5{WQMj{%F2o0R1+Hn{sEUbGG7B=NpJQ~Af4Yo~90eI4 zUQX_YPUB3;m1c3aRMd{dSIga3Qt38=Hiz+ZFwB;@LuTeSSf}hjNVZ%mMWj;g0=8w+ z^0nwnpgw2Hu93vRd7vkHl@*eB)i8Ya(4%lf;y#HQQqh3;)W4%6Uq_o;VWP-;2s_?` zw$9SJ)T5Mw^-W%vV76c@bttj4R0+)_W68I;Y7rgqbrmRaQj8RSsCr=1Rh`RBNN{w9E(TK6X3Dj`6>G(_4r$Mr|N)JFJ8%H z!7hQragf*@)ur5VC{=C=Xu2`yNu5{{5u?lilI_0^%gPvy%=LNTBUNou<10IEa_5_9 zPwvAUL7Xv}tlZO7FTNCw>LflHhHsKJ&%b*%4r7>$EPIpn zqc$Zus_sNQBjH|m*e$DCxtHu3*0GgiE$CJ-D;RhLu~o^x$h{`=_-pWF*6HTiKM65%g-nt?pu;56-8c#H&cWCmx1 zIa~axWGO84RS8=u1Swg4T;Pf#SSl5Mk})wsN^0W_VtJ`~lsvo072&hn$eai#tY|Ab z`)NhID*~;PuwoZz{txVAkLU=#XD{Qu$@eCGTtHtk?|NxpGSM0=sfRvggFxoO?zlY~M9rc`2 znWuxd)M&|_KN4HtKt8#T>M|_3glut0RK(d5dj!Bz*5#=nC-qM)wBz6IyD7N53Bp%eqc# zGUbT~QgPGi|L4;t_>KGcR`vm#;5P=K<#zag$!tm;QGKqEJxi%MZH9Cj?`4PhI!H<- z#`)+hAhlmPv>_ES;??CD335Nv0Z>i+r|W`|-0_;i=TcLd!T3!^A-+@g43@(X*(E6C zb)9jQu_q*}K*_#KW=`%FuAmRep2+&a!~-d4L=cv}CCNhFghOF^mr8DV`pIc<66E*7 zPRS06_X#8U4d#5QQ!GR77&Orky`IJ+N=?^UJcPu?<@9k4&mfs)x%W$lX>w=tDcU@a zj<+CV0cS{dxeh4HY$JF53m2q*Q1(e%;UivVMLQHf(S|WMfN43PCDmm`TxT%?xdTY{ zg!aOoTW~{mHzn^T6{r{JPhxPn<4+>aqcB6Nl*C@7I!o^0lbzEFB$i6pSTx!Z%p{T? z!+bE6_1-AncfvzRBq242t--PihwMnnJvWl66E7<(CyCu<=T=3htw7bsTz4ExY{dAw z@)C_oKCvt9$a7>Wqt{%RBeUZ@T}-J7KG0PC#Sgdnq{+;(Xn>;4`Yz$tTQJPQP1c-%~kfF+BAnsH0=TV z8E3e9RvB-9sidhd6Su5KlDC}Kc~Z-0`1J-T9&jc*X=*p+D?C>Y4DJJd^PNe`W#r3b z_I(y?k2oEezho_!!hLZzf9)nBY7O#|8B6*rlCK!6~a6P~~o@$+^m353v z?y-|vtO)1tVTLFKO}RHts#Uk()7GHvyIB7Udf5ygEburAOJmqolr>Kw%$5qk6L2+? zcH^kMQ7Uzf&C!SC;lA595+{RtzZhmL;vp06W44zAUlsr); zp5EjMA#&e@ypx#POmF$BZ1Can$@dB%*-{90Lq>zdb zxra(BZOZW$K`dwizva%LLUtt3Tx%d7k{olsd z$xdx2Fw4azOuSAKERkmgj3DA1jhwk4HV!JDF3TPI0yw!F(I0LZXZCKVG=Sg(3lWgiKN3gGp$Mdj)oPx)&h>UW1!cq%ZUWZus z2}X9*xgQ<<2Ve36n&?Hu=fkHQaHOK<7d*;pXQ=X^)5sp`c+{b4lJbo+5EOP>Ba|xI zMdp!q%qzQWxnJue=RU`yonc&GMULBRV`jO3cRn>HTTdvx_1ns6(=e-Q16<9N;pTDs zwDyHQ*`8q5vG=GIT8jFdz2BnFS(~WTu$I}iv^M(3N>xU+9Y&4A1D|ms%1de!<$rcB zJ}XhyJ6ZUbg|KOW^0`wVT|EeTHJn1b(0P}qiah5`e~{R!I6#Z!&p=?7!s^H$g`v5KtWLMV8=U_JYAOTG`22^1L{y82=S6lwcVmjbImpFZ}n8)K2*2ns`pBuk3-(lv+Tk zVUYZ@?D)wZb}X}=JZq^gURHX`#Y;#Pr~*QAZ;(78P%@c)(9=-99SlQTaMfufzD57@ zkVLZ2zrwzi%pvj&iB)Lm2UsiX0ojwS#wexw`;H?Kz1*XnLtbM#OdrR5(ulcr4Db4r z?dXmavZE!DfaHf|KP(YPH%hIm-r?+cda7gS`DubqlI@_nH&PA zm02=U2IkMfPTW?4&j=S@b*Psc}Uv=bS=DOkQhStspL+c^+2Krh_-=M577Q#wDIpw?tOgP2`$Op9_{c3gYZPs)99!yvYI;vtK@Dg4-t}7 zR7iQLTaz% z$uP1Po4{vsSGD9bQt3x_aNbn@U`CjNwm&2*F@qUoAa*nq9Z2@1H+}VH?v(wlnzSQ% z5LxBQoR`YVGDP2-;O0tnaWd+&q{>g8a3VXhf`r^5bq!ufhC=Sjk$F|BH>8GBc33R< zngR!9b>9~BWY1h`BP1)8O|MeTEBD37(+_78n|uh%{$!2u3|{vGbor6<62~}px1YmG zWFJ7XVt-?iIvSChHL1~(yD=T~Sr*N$$LTSnblIc2397Q!UmJ;J zEg;dPRGv!awFMfNYNRj-OP;|O#ZmU7o5F9|(Hje5Qoqv}-AQdlJFLAQE%%1WJ!KNJ{hO&RiMFE< z9Y}^Jk?Uo@@)is$=BLF;^`2<4GO?BHC{+fR`Y`DoINOIDS64j6NamEsh?8Dn zrvDI3q>i*6=*e!=Fwl@aUAfOgxG&XpQvW4Sn#cejsbWrM9PRj9o{`j!QMKbLskRl1 z6K^Gz+}WHJ#|Wk3bu+&BcYO2*)Zfg<^1dfJ`6BxBOZ=le@8Cdm_eAcKlYJ7ok5iuP zBzKwb<+C#C11jL~2|SnBklde^&B*FuM^dp^1Fp(F06XE)O^z}+F3&8GT!~byMnGsk zJ+Fax|JFdulLuwJF8MIIZ(8bTF0`56-^pjC3XuZ?|+BJAIv=#8yL{ z@r>e4OFo0=lzzhNBdqsRa395Y1DL-jgZDTX)14WoK0UT! zhLpUMSgKTR%Zw+Mn}J7`Nt=$`hFC@t0KiNR3M^Y+rh9#z-ZXk-{N1 zBs*x5!bhQuSmq1LsTJmQ%Htao{lN` zn$y_CVrHlx*_ZE1?EC~Myo29-9V8Zl>mu+!OiUq_i~rv9Cz;R{c=1ilh%4~2&k(JC zf`6~>l-iXXm!ms=`?!7GzGL6ALw2sy+-dJ*J1v|+PA`7uI$=ATp8A4(A`whmV%%m< zoKtLjoD23b`>?&qK4I^(@7l3^H-`H4SKySN`00J3C71d?HCtPx`Sh{+Iz88Q%r)Ho zlzX+?=ef`GjAyZDo9C>j$W!Q9>zVF()|22_;hyNub1!%GbDh!$>u0s5+NWwwbw0Dl zY)5gvup8JF)=KMjYnWBjDl>PQf0?t)C(Xyq2h46}Z}SOrnmNUsYQAW`VSZzNZ7wob z@Y-Q+Hg}na%r)jm=6JIM$K&S1W)m}(T;r_qg)!E6 z#TaEgXmmGv8a4SDj`TH}84vOKGsZ&WPvd!`iJ=>@#zbR*(bn8*F0y{I&N^N6bKdN@ zi{8rK&tr=c^OBn+PD^+>*`2x|rDbwBxpvyD)T@cx;s++|OpHxz9c#o)iivo>aeu;1 z(nD09{9)fg{uAbB=Jm*D;j^Ld!`CCKQ9H6Kyg2ffS=*XxdaX6qUh6OOCgt)=?df)b zdDb{;_O?%1iy8eYbG_NltZK$uORO>0a_~85{b?_^AF|uo57_7Ik6_OhXE<}fD*Tj= z#f^ctA2|syL!Qm~fV0j{gd?&y>cTo|WBo5F9o28tNot;&tBzHFRP)t?>TGp@>g8yl zCaEj%IC6hom^rzs@|Tn9d}@DX-?LrLDtod$${uNtx98YdPD5vpz0yv>=ZN=f#vIvK z*{nRPUQ(NB-L=QG6WU{XSnq=7`?;QX9d_O4uI~1^H@W7xessO%>gd|5Kdsx^@7g4- zr}np+roO=n;wW)&3+E#{V9m9@vIbbE%~#D_^O!N&c-mzSOK8(B_nH=dB zaYc&5N+c&TCNe3~k;jeh3?B>^hi`;Whx5Zn!zJN4;Va>*;oL~I$mwt#R}413f*s3@ zH;g+*tXX07HAldo5#~tqthvRUX)ZTEH~U(J)-3xQTW7U1K`B-4YQtmp#$8G{9NW*g zHg02bhqNiFr&8LdAI^9oy=~e@X)QB`r8iH}lFE_-$y)OGc+K}lOear_`$PS#Wdr9@*6~SU>z~I23Fjd@uA!q@i&wvf3yzx0}BjtBvVaxoujt%zef| zbF=lm<+A=ZXPB+AgA3+dYnBxUPnMhgt#0-y`)T_-YpV6IJ&r8lK&LbQ^h;s_+gZ!g zP_8uxaK|81w zYgu}%{*yLHTcFj^ztd;wmGv!}p^em^(Yxq}w3*suZKU>q)=P6}MNtnb9yl|T8U7FF zd3@I6PP+4*-Q2!yeQFK0s#&JFm=WAG)?$G}jYE<5Bb_3HILz?$aNY2}&{v^BAuD(+ z*fF>}kQPV^3<&%ukQkWdAK`zGAGh1ax;_{>=~FD_$}BggiP;mc|(t)*MrS^-=icr}VG% zx%zMVO8rqiPCuoc#1_BT8fi<_$JJWu4e}6PKn@<3{ETJZB=PS6g- z1rvfbgZFXPyx_iI&Ct})6QMst-NHXbbaTEn+b&f;asBN5E3RF1Iqa8X1ZB z|17h;*_b)vt~tW`kM+A{TYc}zzG$5BJQ2Yle5q9Y+swZI zf!RM2ea|2|`3()q^X*0{%alXPHs!41RU6_x$8Zc)r>cLd$MC0{Ra@<tG4y<>cIZIxKlqU zVMXt9{DWpvgH?l-gP!2v;2Xh}K~L!W(1Osx(8*A>@Y3)f;lCoU8(*6l_Go9jx{Qn9_Zg)E|DCTKRu1y_7s_hIQktt1 z)lbpQw`Z`G=3 z6V!FAa<>vQO|Wkfv9z$xm_xCerp9-XtjM3?v0)|rZK!id4V?)t3w{^;F!*AyCnL`c z1_EV)Fr!t2J%Y`GiNQv}++a6eEz!-vfEg$XGz&fv394wTW14w4`aX?)BCbZl=0r95=j6UA$5Z=eoXGqmt3~#-?2Flrvqxp!$;`?8 zCcR~9Lb8!qGx3A?iLuXneKD)u_w*OFu4=i{&+ctnk(VMT!ly$&2e-nQ`vMw%H!qM9 z$oDVt-}3hhObK)h?Dl`}-|J5fJQmo&V^CfS_6+q54~(=ig2wyiZd0{BwmLBf7222W zKCB5QDMyqJ>SDD_^=O&Q4b!#9wNmvb^*i-jbpp{|s+z27>ILPxQmhoBzr)HFZ0D9z zu3S(yp&=c<|EL~STWf2y%S4t>=tuNS*8taxuHRe{m*eue>$p3)-{u(RZjNqtxjx2r zcIiFz`C6>@th$PP$wSUsJH~#-x?_$r%Z=BJ^TY&6k)7cQ;iAx!q1(a#1z!uk9h`_p zI${s0!Q|kbz`4MQz{0@$fnVTZOfVQI4=BMKfz06j!RLdEf*+uhM}p&mvx1v~>$v_& zeA}GR$#9;r%e>$INx7&!;5z00+Os`o9~N^s_Wt-Y33ZajCD%+XNl(fepFOtH@k)Iv zZ>{uuc41b0)~bxJ(x#+5p1dLH)x>%6XJViCwer60+30#ezo52LD%%asaHK_~T{sd9 z1zry{2%PtC@~`mk^w0Kp^*8Xh^4IrADyCPYRm4;b=SGLG{4T6%eQ;msa`;qalQE76 z!edRaYTB3VB&Qnrf(Mn#>N2$j{^y3aP5V@vp>5Lss}-rQsw33Lm^q%}7_IhI`!U1Z zuMSf))lK9;34K-*sDmR@e1!w0!jc9ns+k=SSPNdRwoXM~t?{pUk%_ z!^NRJp*KTi!Q$X#^brW$#$#;{Yz}-HcnKap6_^y59#|Zh7?>LP5iZUQEC|dBd=pp` z$PZi%JQAD{JRH0eyhil!bnpzi*&es(Vj17qrVTl!Y|&cw#YPfJ*x zxF)Gr%Eh#wGM~wwTIpb=VU>GS?o=r+JCeCSV{KZeluwd=OWdC@E`DKbrqAu&?rH8` zr8m}oQD)ndO(SwI?7*VE*vcON7yeiLkNSuB+xr{)2l*fLSNE^0m{8HNqIpHnijOKZ z{|ElKKuMrS=!ft}ksM>OF~Q8T-m=2h+pMXY^Z$I@asE`EP|vB|wO_TMmZRtDiTW<> zQ7uWkqHbfh{}LZHU)`W?RJW>2)z{QI>Ph_CBxda;WPQFO;+u(v(un3JkY(7T^iV%n zJ8F+;OSB)gW!hYAk?VQy1m7;-Io}@N4>1coTfA*zKacGi`>k)F_et;Um{acGnRDuB zot36eYwL64V&rD{R%j}g@_evCV7z~NMZ=076|a;JDj!_2x8hJmy8kW4c^3YiFMqbY zplobekJ5uB-h9|F1YLHMb&hU9+E-3Yl zx56JrvW!|rbE~^HH0D9y2wz8Ef%gy38a35=FLEu^DmdQ%xPMacHz1A`K&R7_26M+hUvHZ5SbmaTj9%F zTQ{OnLKo|lJy7YVed8L$3iht&2hX?eUHT4{RlM__ecak=HL^dp_u55v*#49V_Yte1 zm0`tNZ(2+2ROKPHkJg#_eUfXTE8v>qZsq>bbyB~mTlyv4rH|0w(spUh^uM$L+FkX8 zdR$$OeI8OL5u5eZ_v-y!!(A8kcKSr^J9UXN#_`%+tW9RTnQSzOJQcnYsvc^OZgTwJ zR^(J%NLcuTvk!mw`@<@+ogZ;ShicGyUI_O zf9sDA-UwX`w~r*4L!B$0V8TlogL4+F3g7R(j9-9`sH0}OoDYlyz z`O!RNW!lT^x=u5DhcPL9Unmru9&$(SnKP9_{VUHdZ*uHD-|d)2?x7n0(}gv|e9-*X ztYbB|Um=_I1*?tq)^YQ&(Tnwdx%s-?KzUu;=&J7-?)k(0KUXciqdLZ!$NKdPr4svL zhg@si%{@Q47rN%^OSHpkJ@uY4LhYc9!<&tAt#X;n`>XU#+F9Z`xB46T;RngO9VHT5 zYnNNw%vX(pk(cmW4~3Qo3y84l;+^^ie)lK&`}uqM!xi`Wv;EyFR+Nve$gLX~rEuHSa->j~! za!>bOiPhpZ#XPQmulSsA&3D7MLq8hl?2&l=UF6+b+B1!;@T0-(z-s^4;MGVc=Z@Ca z^P0Dp&-TW6y&hH9l$Y)2hyvblUR1tPecH$BdrAYRuJwClN2qtGUU+L{fqBICsu}uZ zX7SHl59`a-^~wR|b@e?h(e;>Trf*ezYGR9od$B)z3*4jh%gQWUwH{!1vbpj!zW3REM*nL^)-mv>})&k@8NM3kQu)yEeUs%z^f7kzPphh4>C?;wX~oGOB@RjGobX`c z)xPOJ7i#%DqZxB$hpV&g*YVX6IliCV&l7HF7&%d%nHgx^7sJCydL+4fD9w+y2Jcp@gY8 zXrne)cA^E_Ot9BDUF}D#`gU*Ud3y$ym}|dh549)RIaaCJ%er7rHFlV7Si5}flqmJo z&&f}}<1DtTn&a&fvsa|H{f61d{?Mtgd#FRnUzRCVjG+0QJ&0VGsq|AjShm(uaoI)A zM^#u*TeXmT_Dyce{f2XcfC$nO%N5$ih_M=v9eWv}YL$qt(ukKgtE9+Iu z*+_gF@4TT_VTD-D8K>>A>aprLZfsR?t;|6&KM|$Z4Tt?)qv(@aVkBzK|Ba@}_%~Q`gwx zF_doB56W0|uhP!7+9^_w`rfv$s;gWv`T=dMUan37p?>ZQpKdT1cpU1UehD_SjQMJQIyr#j+%I9a1o)-JI|S%u~+Y8z*bxysq4 z9@qZ%zo)*dJYu}1Om=s-SNT3RZ)mGs&lmyE1#7+QZRLn-ZTN)K+f`w&c1F6}dNwI3 z&I+wwOulu)8s(XBU`T7KYx2wgq4bC>- zfI-y)OO1oR65|K|8LiCo_FcF0eeCXvx9oYI`+}diUNjdNYdz1K_tetJgWCJ#+utFZ|5j+A^-{{1;5*?_3HueJ#FNr3MlqYSlkj#-aWLJAjoC^r zV+Jdt7sD&`H1%p^n`?aJTUVLU)gGoTH!|&3n(C~GB*awMCqk2~k3GBe(f+aeJHE&L zy3^eEQ7~ZgzcZ=pLgPK}+s8`xd%v}=m`^D0`IZsAyb{wze_W|&KIfXLk2IFMUo{(; zcFa>+rt?bVpyvbE7ZrcoLt=-)x}U=3>H%eDut}u7uS{9zFICTw5$Q(F%a7qIF;i`C zxUTlDTGxs*_PR2R9pRB4&HtEdq4sDvIq{D`oc>kZM`f$^p|MMYy{soab+oHSEpv0+ zd!7#}PWle(E9_U@i>!Y3qpm~x)QYh7l0LNc&V`iXm`@v?Ft!Bf`Q zH_xdWE^z;)cQRQwXOAw~p`Oz=8h<%`Lv7-}FM2C>m-cJP3TLJ|&uZtN7yGoa#7gx| zFjgr&-D9n$?!TPf6{Ed{cKwngiHo%(W&2}?M3R(dkvBY9TDQ^*{O3OIig@iv>=>iF zo#Gl4Q=>vr6TMG|UW}Wfw+VEN%ToIqU+PVb=X9PF82&A$gOOA*C-s=wtngY=miv|9 zsqkseZx*Tn|>ql|rt z$IP(*^VC`X>yf{`Q)REkSd&m{-3sWw*6xe`EPcJ}5pT+k_mXz%LvQ8BcGhnPl7la2 zY$zXGJfrfi$k5U*34PQO|6Qf8=aq0B>rChc@Aqa?yF*MPb(#IM*4zBi@A7_V4Gr;B z@Yt%sEN0oAG4qV)W4?1gT-H0Wlm2npZGB$yq%uoWJd5;iJeK0MM#W}ZMgDEtm*mu! z>j~1%x-e$2f>*W=7dTD+$&vfapmd`XsdeXE9 zi{DKCSxdPWi5nSzp}5F(f8q~jV}BE0fv3E%x~tIb_tp$P;!fB73>r^7Y0V9Nio zbk0$7WN#m@TW*c*WWtGUTNB&1xv`y0YFlizvIp3NVblj-VP z7Y{z)?~V4Z5mRGIO849cn8DgtnBOJvIQ}6q#c$!FO(jHQ$QL8)ilaX_g)Sf#6+Xr7 zca9FNGBI31_d9$x!Q{(BgQ+E&iyTar^RBjhQZEE+WeFSNC2rrjgS^ zt?~S*kHJI2E%K1%wr!63JwtS|a#zyN-miBMay(%I}$`mk7sj5DYGDYltX zu|rHJt{XFyQ@AsAkTwfv^$OfHu{Kjj)wIO?q0uEmP24l1ZY$S9g_O4jkJ?&kYDm?b z@?yikI9I$)|29q$cB2pt?h^`VF1TlEOEQJ7pw`i=pi}q~dP_8s>KjgK9i&|bi~j;w zh`6coYCTk)N`=1EMQs%9nld?DEls{BBsisK7}{ycQkJQ-_D1QzwbYLYx6EaQgT!2Y zEvd`7_}r0rPuLZE~pLr}vRD$UX9A>4nVK#2RXM@*Vj= zuA{BNHOP;674$Lt>!r|s=^2%(J)>)@v+)f&3x5=M&`oi=Qj)ta(8$SDP-SJAp+bai817mTK~*VCvusG7Rh+*zKk4`E-aWzk^rA~i(*N$xPjsvp=I6fYFEv`}sbGuaZ% zv;5_j>&#^TH@yv8N$VDDYMuagK~ZclkfUwjEupRW1XGjnh3XovYcqrOjEa0(t4+^R zn;@W-lu|5>%8|EJnQx}0$Yq(01czs8UHE#T=VWQa&%hvkz2P(7A=~vtYGCL*U6VK% ziV9zcz|VnC|$@kmcS+C^UNeN3D*JA56DJbpLrw|MQwOVIDtOuJ+;jkHjJkrDL{^dP)Mzpq zdazH}d&+OkL;2JL`UUo-vPw=O7t*AZIH|d_`^WX1p5W!!bmAvXClp>rh1{ z463jvrS(c*zKa^d=aEe4I17E%CsIX}vABV$mD(@lHzu*S^B3}On6c7YwK>^BzooS# z*Qkgbgo}`Ol`2XE@rIpQYHVm)LV)j;l|b||gzFlsCHUIPsf=_5`-bTW}Bja5_( z@{)fD8scx@j>v;bMdBk(gNWj;_K7T}t-=(VpkLBz>HX0`i(z(puan9JHy{FHe` zPNz$#E$Dj?UvC3GU@v)BA1gIrYr*VZX&h&a!$svd!x+3<{DKzaf?8!fL1BqZbW-}L z8K|8&R%$^->P@tQXp_#RGK%Go1+GJTjUDM@#5pOw?9G z^tX>Z2Q)z~R83o{OHzBXp7McwLyXlt(0|#YQMeCzQa+EK(yO)O_$tv4t*47iX1z2! zlxPbQp1I~Tl7f$NByn5W!R^u~YCBMd`iuCYozW-BHCY=eOA+X)ejJU`7n4>zNwcFI z?X^Bq9j{a&Su_%L)C=mJ^;X0%Wd#*Uwv(4ZhWkQ0te(#dlmVnEKG zpn0{N+>P-ev(^=v*_+xPak3$oI;oEM!!Hm=5vxB@dQtz7^D#@M5F7M7;ubECo3RXC zT8kqN=yz~=wGOdC>&tW}x2xNrLViH2DYZ53ssPDl)+K;LmCT}>H44%BYZaWWzYPy_H-eJfE*W7X+|k@{OL ziZ7s6id$PqI8kvu0S&}AP$Ntd>&SV^IFvyjR+0&phI@lllH7*cDtpM~`d4idcDiQ7vo~Mm(mfJCby}h(S0 z+x2$nklq;G)9hq9sBc%reL$fDLj$x`S0LTG>o(#Bs-zV|O`w9l94O}j+6w4s9frQq zS-n1LMLpIh66eW2npqD~V<4j9sgA&$PX*$65Rs;B(bvOI2ErbCnn)vykWKMgViP_@ z9Mg}Y7f|{A8wItO`WA8~5NKb4mCYi5K#vh_^FXh)vBYNLKXr_LfKc!?Qc+t$-5^MJ2UJ@;P*rkLX3oICKzd?6XlX{XD$g3D9e) zjN6i*w7SH8T!&=vI^repbiK%_nvBPwnL4StfpI+ql@$TC#|rKMIqqH+^fS~5^&g@- zITk9NSI|Q(0=ftnVC`iM zOQZ5IyBpxAXdM2Js7Fn}FGvZM!bA04s0OhfIKlO(6R_bYwSRFj=uN-Tr>fo1Ze=`* z(xa&Q=&4po@2KuZjZ`z#HLPS+bqVTCHPW1T9|{1WbcCFMbhQ+kMy0F2P!HmYeiFXt zVtpKP0~6C6zXz^i6j1;z(h2keuRs&j_4-*%Qt|jLvO%P9geXZ;C{-_mRBb4^6d$8B z&8_!EJ&Bh39qk9aDFGW$N1$wqkZXZtor{*j=zoUq7(-k_Jh2)^QvkYV7$^{cZzXEs zDrl*G5I(mk=scDX<$+eOPV59S?=#T}@z7_V3LRrVJ`J?#H?$vm&O`OD@Pv%+B+}7i zZ5WJwAgWOGoV zEWmNZ7qk;{*?w?KP$X)vD?}V_K>UX8-ck6bFM$4fPB2OnXB z0J)g>s=d`uXh6xs8T=TxhWl;0VsNm4TS_E28N%^dGDpop3buno6J)2rK7axfpCe6r zS@Nzn7tO`f$w|ta5h9;Cpw?VU>;;CzPAq}< z(-D2g3xR<=hRdVYFavJrmB~yXDr>@%7e!a_MARJ^?nbay%fj1MfT|auCKZ96=*qd>@fX_c4!^36n4<|K*)E7 z{cH;;4>uD(FsyU5Uf+Vtz=(dtQOKb-L~DSk>;Mj4C82(KhByPXW-G_>qzI#$0QJ-}WJRqBN+3Q$ z-L3;X=U5zu4uTS9DmRs*oBH{lslLE$+ZYTs8u%>k~uI1)zxEs6)dW(Bl{ z5OEi@OZ%g1Go!2c7QC@a_&4k-@O{fHCVaS}YjAQoK+<1mUi3#zrV zdNTT{|Dk+(!|bkswh|ZcFue!VfCj-H))l^IXL#n*5V!t~|AXCVKbow|xDC*LtMw|d zGhb29kk_>HL;>87%pqpt3otLdXb!}&TQGwbLmZU_yWL4R^C&n?H^N#Ph#o;+zewLg zOaX${s~5wFstG*W35`UfVIDn%TIng+3n%KY@hF&igY;{78!!N`v}CVGT z`XLwIskb4r@MvV!vdI&muIr6$K*#-|Y-w~vUrMY+rS$u#CCo4zP}sxuwWze7PfWlj z^c;49!cgNK0=(`yh+~Vw?=MBnfzEYZ?KIg&-%fPX>Z598l;%QxH6JMWo?;gD0kzl+ z{V|b>u>J&2h!>#0{lj(dCMtmfZXR?xf}meZBL+GQy{q(>3%_I|1LxsHc)kH7_27nvy|nJ?e%#1C^<14Awv;c3B^Tmcz+Y0QZ2s zr8p`9^ss=xLd=o?#L_-c9v=t(a}SXKG4~uINiU-_WIdR>mxzsIJe(D+iEa9M>?9Zb z>GcDry#eU7+SFNXl|BSnaew_Vu>fDvk6|lv>Ro~534>1^qn$uQKxg~_NYWSZeRQZb zw*{WGA^~^C5=G(jufgwp24v|y;Dm=llu{ozhd%dzK!qIE3+SuBg6zDOLj8mt{}*t` z8^|1T8fNhw(u|WJK26Yj5*0~JsY+51?+hdo@pD+wKX56cFX_}KYE_5`sxa;XYMIZt zxPAhty{ZWMTslL|)azq6K?bB$o){a=I7J)Owg5Hx8)O55-e?VNu82#}N ztp!Ba9ysR~!y6bx7|3eG8a$H-;N{3fd_-|@-aW*dQMSGr=%T)aMR!0|`6g82-{MrV zI+2Y_;dThYyW+`#L}8$kN>FfL3BIAR=#svS7@{x5J>flkfbZWLch#SvcY1ZaLqAH6 z!=GUfI*52}t-g{hsx=}DpedjZ+XCM;UT=*H5-L%e%+|y8qr^zOi`W5`^Ydynu$Bw8 zV-UAJ!qdox=(7H2FC3#M;;MKAP`tH?x7v63R$Jk>ra-Ll$5W#)>eIrZh zJ@Gx_KXMW=oxFmZss+(~y}VwU%m9^oX|xu{ll>s3F+gmz1lD9xI2oT}0#<5AeH@N~ z2;`)Wu@CmxE@T$)EDGPU zi(VGiY8$i?szK+VgVc;zhI6#8pko@N$D&%Oyq=-IC!VX%sj5g;Un}F_ErBlroExRd zpAa2%BEF+%`br{2nWg1wpY=UJz9gYS&`W!xufZMV#d=AUMx27D=%-gkUrEqZYCYf# z3nQ!$_mqHViGnq^fOt!Y#A}GFBFI{(Ih>*2wGsF-4uW3FOtjFCV$fmy;bGvs)PQ}e zLLQ;abUL*V6(beUHb2(W^v00W|AK6Y)-S0uwfWiIu1zRDhSSk1C^z#2=sb-9$&wZoVR0Qb*}e%x0zx zvyI+NKLLW^6;Pnn=)b9ps6AJ&K%XrOm(X4)&6SDDX~hFQ(fu-3x~V^)Gu0XR>+VXD+*EQ%mE;-Fu{2A= zrH=9=;1P?%-(@I^6a@&TiqO?uCT$eThq{Jd0`+zR_$sqt7R^AvUlzAZT@+RQSF3>) zJ%&DQJQi~>{cEu)rB;?%UG_nlPgy52u0}88_Tt6j0RK>TU3W|WNohRMn4My55H>s_ zHR7VBzi}lyl$?ZDDIerLid|i$?ZN!!yv;{emT3B`lSz4_ed>-DM2c5!hbi=F?3S6BI?p5Wd?K-=i?c`P}yOB9YMS% zZUSGs5$d`z_D#PWs(l@D_a!^YLy~#sjDexavPzPcabW?XIf|Mmr0fyj5U|^si za76n9^@4u~<>2DbH0Wsig71T4gE7Gc0VeRopXWCPX8Zg4hI-Fq(n zk5u9&B|S^4R-jDUn7Cz@Lu@ick^__t@*qt`QOqT_7eC5yn(IssQrZN!c}6>{TNnMF zYfJDP7JB3M^c>@@h-op=G1(CfOhwq0L}|HUz~afyKb)WI?&$sH?;$wlQ5p?OtrbKF zsLWzOQ>=&T|7a?kZo@QT=db~K5qU&c)H1RX8W|Yk9|&)QlSV@RI+gy;RW$mICyZqc zPuREARiF`is!bFuPXm3#RXmFr0{qNCbQWU5shUGU%5r50#1Yfj^_Gp%h2zr`2FE*N zOc5#kPTWf25AC9h(2L0h=pN*gqtwDm zw0d3pifN(-xr_e5FidL-A|+kal9glPAfdY0P%fel(F#Bu;-JIX6lM=q1S(zwQ9r#6 zMC)In7I78c-wJX*xf}A;`tUE?sf=<$exsa)p4bnrjk#S!hv@4ux1vIpxm<5lN415z z`L=rc`I?7{DSmw@-JkDgdSzN}xW)`aNm@u;7i{f+;VT{J6gn=JS7vG#@MrASwkRQS zQD~LFuJ^Kghui4o{SShvVpHXqHVUU`SLJnr+uzqS*!ja+d4BhjY(erY}Cc_*(Tf3p{i|AZHgJ``Ep zluOmaNck#E@*i(Vo`B63kXI(r;UyUvUyBPy?;3e`g zTCKO$UMo$M8p?a+x!M+&LcN)i#&?!_;i~$4#Z(r~lm`PXW)v!lb(H7&9_leu zg0(Y^m=_d6*R`skKWZa;0YNg`VGDGEgB@N6e8Y ztK&4RP6IxG3K9N#?!(SUwk5U{n_z9?XyvNxH3YjVtLc&9xykzqhi4To*1pi7jPl8H zcq;Kd=nGxe24gIJQu9o<G_7?vjGC6T&{O0fv z+)T2i>ha%nUa>u}m391+e?+EZn&JX`!7nj|cclkg_J zA6^aWu!mZPwpS_=5`s6yV#)!a{QBa}9tp_15y>_Dy|_fss>hB45c$iEPac0k4XF^sqX)QXO45dV-XSjl+b zQZKBbWuEzgslMSmH3nZ-8mK0%IA|35X(~|?NWZ?se>jK>>KC5Mt1VG*mVBB{3Uc8*W+F?Wyx&*MnZ>z z<&(}DPtXy@5~kxs-@vipE~-%Yg9ynI9XTOhjc*rTf^y4ig2Oy(9WL8!XKT-+z%E%q zr?_2wRW6&I569aRVzD}1S)!c+r;CT6(%cTUol|-*^`@{RuqludY92}m&J7-r5agB{ zkQsCj<>>~rnFLopoCB^8(}+D#-4O5;U{_P{3}rVES($PJpq}O^sdyCXPmf_cbG^7M z?k3xYnF;m#^`Q4tfFAivy`+VqT1*QAGQTjNHPIL)2 z8%$LupE#?Y0(H&+b+D8lYA)VZ=RgKO2=e*0kom0F7K&p7F5fnv*W1IB?XKo2=^GBC z!3T>4miia@=lOPcT&{ag%C*IH!!^Ot-*MmhxBGW#wGp}T`znq zuzMDEt-Qlm)RXKndh3AMU{GofC)sBpWFhjD=SaU43LGo05;e&O^hfp?+mwzaW~=w* zJ+dYp63zs}g@(#&P=K|Sr^@xZX z8A0A7%Toi%D(Dq4oGeFY(xu5Zpf_frW_40qtbCL=DLvJ5N{oC|Y%jJI8wp~->+9-s z`@Z;>`OEs}`MU@H1~PAjf1|gIr?p4R4> zVXhJDPR}ecxNObh!%|zvO^Xwv_p{X%=vPNtQ`*EHGL$zDO*oKvgYPZ4gyBp#OF2VL z=8LIyyf4uNV(k{B6VvjL;1~b+U}0gK_)t#POlno3y>GZz@E;E4h(n<7-GKdWB+bhW zC+GsiIjx1-NE#Cy7$_;0(=Ovn>H%4npF!;NO4tkR_Gu*>x*W^sYs?{b1XqenVycnF z!E2^FXh5dGDp;Vm)#u@d5IM)d>e#G0p~h7b^eDTbw>^&0=pe)Mr3`)eT<#QS<5qBM zn9_6_wH&yEE7tKs;5H>ZuXRI&$7@NzNPhbnzOUt(`<&rI>!`sjy@sM)LusS*? z>Sfrk@VAkhERX1h_>Ky(sd`k~OcbEDqW{z!VY&aUXIcJ){Ii}j!K12^{K>HN3PJ<^ zGhVALtq>l`t%)4+6V!5!YG>3EQiTbwwn#j|nxzbrsf=S!GhF#n-j#<_956aI?y z8yu6{=P?k@~yY<%FjxL4jLtrjQC_tde=OG*m6htsE^&HjT}TzGNXzB@>PZ{}br1`A5Bl7n^s#6n zwV66eJ|<1{KB^5WpqB*A;d@XdUc(8*UScS@ow;p%VVYum$-gidE%z;r%|nb^jg?LR z8G5mms3FJ+J**}A10X>gLv;mlt8Co3>y~ta~`_TKZ zcerOlnL6jSC}<(dHeIPr{b53z*f04wkdV zWCLgZ3l!F8$XdWK4gn68)~3iq#owUSoFh~S4h`7Ey`ajOhZ}3z`h4OE&erb1Ynx0S z#HF;>xTm&SJQN57hf3Y#HR3s8y?9NmDI5&F5k5*2mHt{R@ftluLy;A2Bv+B;$&VyU zU4+hS7oZ?d>j?kC)o8&u#(h1o-w;Th8+(|u#Ai7{6*z2Y7KcexF813FWQ)#(>x zF{mlJA;YhWO`budh zrT}4m2-IqYgw;ZK$u8X%Uy9Wf4mSk9iQdEyxKZN*FtMk|vg9LBu`D6CP#2lu{5H-? zi|9U85vcQY<8CgOoyX;JW7xV(fSJHm;oV#db`9(VRz!gM_%UJCa^*TuWjHNX6jDPE zf(2kVZ0#@N?cxr(CAZ>E%GX_soRO{*&I+y{&iYQ#>9>!vHMF+2A9DS0SM%-!Q?CjB z-oY50X&fCjI(lgAq9j}D&bSi>r{!i^-IAv=9g(8(Ow`!ef5JFJTDUcSe9Q`d9QlwWQb%?rx*z4A$6mDo!etM*eGEA`Y8Y7O~> z^q&&KU7}%CXgc)iJ?y9&}Y5{j{&1XrtpmQ;-whE=o@HB)^YXZk`yIShQ;K zkBPBiIgu#|f5*NGn;-EDREML?FR2gAD&s%=PwFT8pJ}n7fL=;6;?=MgdXwXz%hns8 zmhK3l6mK>YrX)d;sof~tN^V=Cu$CJhh9yspvp2$I2Y$+$1`1-J?sqbBzqS$uo9fq z_nGcACfAV;G6YdfTcFmes1=p-vLta*5zzQ{2mSZ05L8cs{$NaK7pSxD1bPQ@;1nGc z_~7ULjr`&Mn*K!pHs1_TCtdK(_nr4;`D*#!`tJmqgpLYjfhzbS=$oDfT7mX-9_WH6gU){h=wYa4XpZeK`qMTQzDP-G?!=aok|gB~D=rae3S#ej@*XCk^LdgmU?0LwQ4E!&bu_ z!%#kteZ^!kSE=JDk~oW}sz0QEg)<>naAu$j%p%6u$Lsfeb?4{ra4mLzaxAkCwJovs zx8l6A)&qH!t$yn|+fT>w{EptE0l(lCXDGW=8*WC)jM4bPJUy&&_`--OQ7N&@<8LLT zCtFhbrTk1zNPd`PP8yQu9v2zk4Ie`3!X3Cp`+FVoQqN*zc>MYvK@#cppWWEy{Gn5r>Tq7I(i(_jcLssWgaq9 zKy6!`jpbgkDQrBO3H~~F>EZBSReBbcNX;URXtsU=r{J6FIoS%D$S0vG!J~mnf${#; zzKh<*Uc%GUy*R&5euw=2uBxtRXTJTaeVM(8qk{99Q*ym`PxD3x`Uy|OfE=Y+^f6=y zdNO;^u-C*u{pDr^8{H}POWd%;G06{-Yo>HfIhgVzd4AHK#ERgskrnqo`d(xx{FLRU zDa*K)(+sfNZI?A|e zx)*zyz{k){$*45b%IgH#k;&oq8(*8-gf$EQ8KFh(jaeGEA-+fAlq5OnX7cRhoypge znkCImT$b=OJ~=KSrbp!B@Sc{=lCsdzT*eBjS7% z89OGfX2L@luLjA}l0PLMNKQ?@m9!v9Oze>`BF-4YMft;DTP)^6#`k<@*h{17Bjj>a zpC}H_21g*4*r)W9D@#QoGR+8;4sHvKhSTRzV0U0hpiH1nU`*h2pg{0oFbRB28o`eC zJ=9k}{iiaG*9l?IVdchvS0kDq+gJVIT-$(c*v=i5f@5S*_tb9!#sfe(zR?zlq z2HYGk!vBEAy%LZg@!+t^ql2g;8BRsev7osv09BA<+*$4$SB_u6-{F7oxA;MPfE&lz z*skmsW)580RftZcnvko31sx9_B5B$OWs2NY+6~W@CX@*^4UP!x_P6m@^4IW>^-uRN z^$+k9{sX@CKBGU?zsCQ=ulry5Z};C_Ue{F|D^ zxIpLZGK?@O<{n``!oNn&jW)$rjN2ABF|J@-O5E4j&9OCN!(t7wZDMLhpNou$=oNO} zywo(_Sj%vTOJh&dttcmY2MlC)EU3Srw);}HOSBXzmJ$X+c7{aQbKfz95m}AyB0bfsA^QoTa>j+EI0Fw$=!|l9oVZ_5dS265M7E5iihm z&^MeWzmj994fI#$I&=>*xrv;S@4DRIG^4r^H{O#MF*u{ zj*Eyro%kuOW7_b9-QkV+SJYW>ylF;kS4+rOWViBLiVaN3k8yo*w+qx0e?h+?LS724 zCt31zoJVv=SINq3N5d{Yncf3VL@Wp+2~?EDB`=JNuh>d%D-U zXjh)InQOB1wZm)I?8EG=t<&$YU)6tJ{^|Zz+IGX)$Ftm*;hPhvC+yS{x%db}-1y}8 zaM{Jn=*0M41y5)7$TAi(6kJxgT~S9yjkqZh*Pbl{d04RKv@&@OyaLh2-x44Lv@+;2`Xlr!FBZ==^3Jd{xaRhERC zEVSv;IXL6XhFIZm*vkkxTv-P`BIU(1;te^dokj=gZ47u4kln~8)EfFJL6}Q^BWjCsCjLKvqUTq=9O+y8aYcCh+YCw(F~! zq!g1H38O>TU|)C_7~~)6Ips1t!<|)})tsvxg&eKy18kJ_%_E>gQ>Tvo|KWeW`}W-r=1-5A{^tXAaB z*gdiH!&ojyKPi_Awf6t?mh!&!w(%|X@ArxB)~@ExHTedgJ9Gl*;Ca+Xq6OZK*HiWQ zzYP@)>kX65Q!P$oXGR1U%5i8isx(L@+)s^;q&8@W?8WT)j|{ z;@^sLg%%Y$Rjgc=R%m!q)989pJ7QbMkBjEb-=Q!0OgoIG4djV%&{yWc1pVN0I zFi%X-i}Ph8HpiyNG>KRl_Fwd%RHl%U`8d658qR1`cx2(X>88XnvF+lECT~eT6Zg*2 zgIxjsI8A_~CFPWmJ(wZngwFb2yLve~I%m48_)hv={#*WX0XFzFbWQvQQSl1hh1ycz zsY{ehjik5Ir@^6U0(+mM_^#|H@O_*F{+Ophv2z^wv6JA2yjtH1r^HF+fs&%N!$WWd z%?nkZ-r!ML8GJ07Xk{TgdIWmjZ@_J}1SPjfuBHEC)-&sv3E+ElogPUag*AH@`jI8* z-DG=I9aI6u@L;HjJW!XZSHVA^h16XvF7_06h`C~U@t`nLFbhXR zY9~19OcwfypG1SSTv{omN(ektno3B%EZ|bJznM3aZNZM8)DC}m06it0K3Uw(7~=jm7{p@ zGFu3KLx;dQCU0f#O=c7O8yzC#|C}D&3XcO0qf@ z{5)HzWz!dUbD$+S2aQz}Wu)3gtqMQcr_Rwj;!C&yIHNuyY@nH` zL)l@4mSIZ4X}Ozw$&KeE;7mAn2NMf3`x!XnwW0n4by8WFg{AeXpguaG&V&r^n`ja1 zLVde(=tFQ^@D{kNtn~Nycl8hSAM&sB?*M0&i~hm>k`Q;S3M7SE3umA@FhjYb4S~MK z7w8*zf!b9KW-L3D^K#etmxf#52erpiDQs%kg)k{>PWbNd)bM}9I)3QJI|8rU1l#+n-S>x^lfSil|~&PImpO8x`+*UnP!EtIG_;9F1b4Rh@OK} zV>9gb3dCa1!If&E@UQSx_$-j%x@V9MOO%`e^^KzPY`Lg{)VJz>tvdL@E`Tia2~i5X zXI_x6s7nz4pJQ0|AlriTb2+?|UurD+L<(d;5xet9e?w+vcBl6>$x8N{$Rid576~z*Wwj>u&6+=WXx* zH`HF90-6dB`ar#7D)F-nnz5Gop=Epc%*d6|r(+t%wvF8vTPMyRcP4&8eDC;4aaCfg z$1IB45pgGMhh?4FZQ5X*&R=FP(buSM)DLnK`2`t4^C1F@SO@s2qYABbmIG2>sN{4N zW`#`ex}cB1U({^HD-94;`1G!UF#i+pQcn@jc6UAZ^88D#?@qzd&yi75r((y)cTVh`JS1gnN=8ar z%G2belsm}_lC~x;iEkJ;H>Pw{M8p?MYqP`H&v=yA*_uo#Dx2(tj)NC6#(TBGYNSHQ z1EgBun{+BvHZ&`=I1~}85iAqfV?4dK;Pi8V0&;U(N?K0bIf2$fe+JbdpYmxpI*9(07?TEXUR6PII03!Vsrs z@(f>>kKs@7-}svROwJ1V`DW%V{fP3CJAu~Kpd$VlyamF=zeB5nTLKyWX1-s*Lu9(= z=et}toim)v98P;7yVJVDx;U>*UhBM5dH%em)+&x!t_@!7UmGj}E;EVhLOq4rz&_y9 zjTX}rON$65IyUZOT%p9jk_xA6PW_NNCG|o|cT*bs|-xpZf$|W$OWXb;%D%HTOaBlx)A&UF~eAh?Ry5w2bKkR z@chXNdIOgN4TEok3qvJ^Ekb)ZArH!{l^*I0^`p7~oIsbt9Um3I0sA@ljsLk_=#RgC zCGZr;B_~b@{^ya_q4`D!%myU_2D*gv$=T4FN+%jh6w1-%`px$d!qYcz5!~8NzMpIG5b$zzV(^4 zmo>)L%GTUg-8R-X%ihV=%#-Us6nZDDm5?$?dj@LKFKhvDeJpNH4|^U_Ec#xoJuWZd zRFat7K6O!Q&(sMi)lyoe^h~*$oRrimp+Q`en2S-DBhtbvS{|FW8^0KK@{8H?^a`>% zu=PXrUigT5Oiq?GVGgXlwV`Lh?!l;FX0UPaNZ_Zxh~MlV?JpVF7kC%A9e4#URuPaH zT@$K`x|k%_QexGq>I!v~dRVOi%FyZfI9>&Qyc@6$#{tn_8FHicpnn@i#eu)+C29tB zjvmf#=T7sp;8a>;NHTsj>@h0FW2Ua=F6LEc!PMFKncu*dfSSezwg7XFx{kK$E%90P z8Pt5<2!#b#ut%V-?}x|aDdBDmGp|?vQWxdi=P){&+wa&)*q+$JZIo>@jNC`tYug$7 zTjw!%rmt3LpEyV=33TjD+>RWJzrzkiX2qD|KgXvgeM!EU@+dK zJNdjk_kSxl3#bBF?XJaO6S&{k1fR?RQHI=2oux|CSxhsii{0ZQ3|YqChFXSKhCHLo z*b*4MTIN~i6DEu4g>jLgEv(|w@c$|H2rvxmK*!Jt&+aPH%e5J4C!yu{i)@X4^ zmWDd!XT2A!;S(r=N?{JLEZ>bC4sVTNHuF=B?~Uz^Zo_Lvu?aVh zK1!w13+M#eOt+_QfLiyZQdsUI)s)_fBLyL_2k6pc?#}r;U9qku`CVOwT{inJn`Yf; zy<`)us;ySu&b-sVD(6{$ugf#rv6J%Td58Il&}!vBs&|+*yn@9?ZDq-b52>><)6$0~ zrp8C41X3=h?nD;T=T0SA&0nuUU)GD6zi$P5vd-g)Yw-@u~Pu+M&klll5g9q0E*F z%7fJ${R?`Fj_TDkmpU95h3@YZ(3LDhK2Ynt25+d@&_O>7y1XyIDm()kex)jaGhajS z%>9gtP$#J;NF~aWaqK?c1!Gr&`^A;uXLCIFkegtbV;pbj$vuQvxF&OoRwyI5<}M`X z>U-3+%4+cPi<0Yz4?|JGN&cY_hg@(+cnt7oxcgt1=FD)Ocl_nN;^?{Z{B)6^aXr)M-yexA@YscT|| z#M3Fx0^5tMD!4Vl9o-;ie8S7vrx9z-vEjoa_n70ElHiU0MIVACrJdRvoS&|VrM)lQ z%Y(;(k18n56%L9s1tQqn-yk$g*`*oPj?!&uh_XbvB0Ui^lxg_A{u4XYV&E6nUAd-A z*UAxNK-o4Pzg8Eh;aWx923Bk~Xb-o8Gg6M$0Xk1>@lCib$_o11bWog41#;+{K9{=B zcI8Si$0#>-lqt*I;@2DdnH}aFQw76fs8$X$?1sKdPeT(!XTBS=l|+!^cLcAt0(v~2 zt47MbArQa1E-q2#nzYhDvra> zrp|88Y*+bw-h0Y_KU6|->(`hArfHUCmYsZgW2K1I36_jv8DrDird~=Nk^D6=Gv#;s zoI ztE73!!;^N#a1kra3r$8tVYUX-g}KJ$qktL(-uOe6c5=3OO!y(D$`8dG;79yatSPk- zs)c@q?n=$Tw>V2ON$GNJxtVky`ck4CrxcWbNH%$<+EXp5WXmqO9=O;I(nO5{O*Cj2 zwU^p+{1)y-*$>+07NF4E0XJ*h0C)Ku@FBemdfGo){5Rw%>Neerb#e6|>buUz8=}CE z&ShL|8fr=~wKR@3%;X6%;o1yy`_dor2N66B0$RhN0rV#g-d%!j4pYSz}9%C<4vT3War?H6f zhM_w|ttYsVFc+K98{j5|>7bLn4t1SPY8~Z<6eh-m;sXbK_r0sV<-G6R^YXj6-a1RT zNY_v3#XNLQakg|4j=$_CdvDtnYl5w!?Tf9fGw2wSAMIJ@D;i7_D=3k~P`0z7P*|I= zlM!#Dr^aw*%fh!i0m1q$Z1bYFR^lvgs~PKFukFl9OL z1XqEVnl77xQ7;NnV1l|*sSh;pd({m)WF@t>wh~4#PumUk-@^J{+zPz)u7hglm-bDY zk4wN=qk|?i1gFhpREVVLE6gBn82^yB8j?*HO(#qXO_ZsZDc3m9Xf`%9b~9Y!8IEH= zGM%Vb)HG0U_QdnmPfC(>OH30s2G;~4d>P&+?yC8stD^IU^R(0GOmKd(cW}7v|Hsi; zfJs%hef-qa?(A%{0lRdAl!T-pB_SaQFOt&T9Z~`UB8qg1bP6Jp(jW*(cQ-7xF*B#n z_v5>`zyy|=GtYD1|9t!^{OZn&0xxGjZ}nV%e&+eT7yVx~ex34eN#@n;3ZaeKFGg>n zgL{DQNZ@|-`}o|sHYUGId7QeJjpUcSBhyRe9h9$E{{HzN=leM=GOcB5C~0BByqItx z(!VBRox7t;RNmncd}itNM)hglHq=q&@(US-DT3P+AzF(h!j=ese3NC%4wnT4k zyfAvBQ(tR#v3zhR&5R{RoOzk9lT`FwUrWkGCwuC>KSlIw?b=!bAl&>>A^p8mIS-O3w@`3qn+1FnfpMJCK#D$re~N_Xoopw ztbq;um4=z+d}8y0euT`~)=q1HHQ$^FTQlAoWaZ&=yp5j3N+`@*pot&pJR*Omiapd` z4c6l0mKld*ubWgrdE%<Zqp12Mv zwdJlN?WtA;^SE{(d<(YgVAi|$dEdQwQ{v6A*T*xWGvo~I)%;h>UtN6V%jl6&C8I{h z%2yRLJg*X7{q-{S<-AvSUTu7{_}$d3ojIGrcl3wWW4Vr}aMauAfpM#HtxE2dyLq1G zX>Zep2#&0#341ZlOqE%P- z($SHk9z=OM7fkW9;P*Kbf^|Z}!(+l@)e(B4ncEsjUTJ zTbv_xaaHh)^rc01^tX!g1$qQlMuq$y|E7q6z7yzxJ9=8XDXwEmic(oFME*jO zm7yo8U4kvM^1q++M$35hDvj4ieOdq2mRH|q9M7op+Q^ue;k>&1>e|cCUgdpR3KXW~ zi}No&fB7II^6eDdMt_Cst3Mjk?Ben{_s4!GIwST%(wXGwv~g*_<$aX5SAli~vhsbG z|3$vO`Q!3e$#*<$cKZ3$3n}*!YsUAD8R+lf{ayZEyld4qUw|l{AT4||$jYmnl;8!n z;LX89r2noDbtk>NhB{DtsU?9!tt3k|(@v)u>O&7$1NJL2nrNh1WSm4zlVRw_QgeV+ zz@A}`qI2gOZ4Ey1h$Uvi^JpUeu)O@cygYNeoLj;v(Jg!FF=&p?Ey?%Z*C6t%$i0yV zB2PtLj{Gn3Uc`IfDBrK%G2VYYQ$6EgtPdzYrLg>lOsiU?&`0T6;gUgr_QdyB-(E%4 z+xperm!n^P@p9(N6E9!CdiLsM#xb-aLta06eJ&&K>+u;b(7rjZ{>%9M)#ld|-+Ynz zAp2;}P}J71tsTN4m+0#n{Z-uC#15%*@-)jkI^T~4=I0j+2lIC;P&WVbd|mS;7Q_jwx{Z~%0;E13dEDzFI zSlz5HRM%>Qv_kqIJw#K&0aR`w+_;TsTs=u&=SjP${j2$qhNnk*KfMfCXaytH?29Hs z$8BB+U2kEKgh`xw=lI0_!_zqdzyJ5r1o^x>*;6{=ePkiH^=|`>qyHBe88suaaKs94 zch3RWWLH=YxD2_dazKezZY$q$^Y#!korcytePsA<&eP0O?_R%tlkoz+a0E!-|1!qC zZvW=-o6T<*yzTIA&)aQpE4=;Z&1$AX`Av&AW#1@owKr99CXUD)kToXfRj|6cLrvDZ znrF?DG+S);jPkXQ$rESh+L`A-x?bQ-!KQ`l6{%KqO2I(|m*utd#^gDfdt7pv36H&c3W4-{0idKbz@f{gz!d)J>hKFV*{)s+nwm zWL-9=f)MXE-x|Yc)_RJ1{sUILk=JJl>ZUGWhds?GBUyj1x%jq$9*FL`Vze@Uv%1-D z;Rx4DQ*`Az3@GY^ytEA7L$!?NeH0!I(qwjxw zfBW6$%vG67v!CVk2%Za@oZzFhzqL~+gi31z)JEzsu)3n!6?KmGKp$lWNNFe~29=*Z zts*M<69em_>&9G7XdZt(u~e>NN%L~uOZq9HH-5ia@iXF=#qW!s8~j`18{*?vy6OML3uaf?2+3$usyLE-k1ebg!lWi^Yo zvPAPI^Ci08vql>2U0s-4B5`+jViKH-E3GB1G+#68Tr)qi7m|1}$oX3+D~^!5!y4_A zHljZ7FAbBfp!w~{YxXH^0RIYK(*EZ$rVdJ6iem%<{LW$w^Wvf+QunUZrW=S@z7;KiUD%}b+jP4zooxyHP9X*6lQ)63D4 zcEy;Fj&zna*ZRWlj{d(AIUd)^rrJy|U@GY+MbWb_wMXNGr{$VCKffF+dtLioIc|@) zFt}0Q$n%jc{15z=e@axfsJc<}{KW!=q6!BJ1cpbA4!p$^JtnH2zm1E@$8iBbuDgnxu7`W+3NjGinU2HFj*0*?}<`_ea6?ogD zX%!f2oH7Kw?}}O7+>3KK7Yd#H`cQh0_VL;b<`tZxzJM*Lt98afvQ^uo{Q`5aMhj{$ z^aOtX_r`G)Lr2ZeXcLO!K5J)3p*~*jd_!_cA{hd)IOQ(UZdjA;-XfJG&6!WT&;@5F z9AA5~KGI15Y)l*B3RGEJG=*dO(KH6wvpdNoQ`N3xyzRjIny`BH{Dm%*UUHATflqH{Rqxtol?uSPTnPd zsoa;E$~$O@S|`0DtvZvgw$n^3-JI2SJ^N?t8>^U=K$F29YXxnAP59~;lU6fPAW6^u zhrIjm(0bL=%Fx#Jj~1u@uGceK(7*R7Y%LBe>pE?9Z*@tZs6ADWs?${$XxmL~i(b&M zXz1s&CS-fUs5LZ2Jjh{E0mUJU7b*Goi0CAG^>tklp6=c3z{s zt`ce6hpgtPZVwv0jJ^5~_-IEMwNaI(!4@aeTJ6D!HIVF!PwD+xhO2UoP(=JplB6Tz zTIpZuiCi7FXa`TJD0`(u>4LZfccMaX))R4-*px<;@^Yl|2^!09*dYz$4WC) zl(m%}%BQZ8C?zT?e<)X#zAja1e-QbB2ow3<$`6ErT|0}uFGnkRNfHCr0zcun&Bb4ZqP_}vou zRsSbixNfdXGnApOCQ4(a4SKOLv@=|!LvSw&*!T2*MbSEUhhM9q)Sq0XHG+yqq_KDm zSJQnduX2|3@uSixc>?d(D}MLMQbDrOzlD9iBJSb8yyDr9l6I4HbCh@DfjyFTzfZ|v zs4bVShuXWnixmC~7VtbU&LxwXOecniGhZS#s4J+`RC#Nr;l5@OA z?dZ!eogKV0ZD@%(Cw*ww9EV<|snSUqD{qzhiC>B1Ngt~L-xLPT7Ua%+GCm=t#)WP+ z3Af!&x<4cBOIEb0qS;=fPc^*8B~G*hC=y;;+h}zyE__2L>jMzTeNJuq7J@YP<<_>S zdDz}Z=w-?Ah$q!$3YjltCDNVxD;;V>ght{qQIe{W_g6-CD>?Enaz8rI8q00*MGX*l zl1o>dG>yaJ*WxntA|s^};vKQRSefUifw&AG%Qxa}p_{lA{a_;A;W8u^Epq;||Dl2O zIGHP1&J;9TtH`nWgrxGpc58bnnqY%->?vx1OH3ud+NJCjBn6Cv(cM4--b8-9MQX|? zG$B49A!eU=kh9>lxELQp6dfP~XwNGq@8RU_pbS>Vx<=FdGS2;z>$K||x{PKif)b-V zkwbD*IZm!A4FxwXAXX4tlVwpz7%3{!R(McL7>z%FKJ4_@(n@Ir`T~#G0QJWNZuha= zpjW^hzZVXZ=Jt``gY*2yeg_+MqN2!ntox2GeaNJQG9Wc4?BFqGY=^qG8#p z7|Lp834MsQNi7cnccMPy_D)rhO`T2;IkJe6IUG%yWwWxBuh zJaea_wR`4E^IdUoRW8Zbl-xJA{layonRLO+dCo@rd4J?I+as$KDjX*+(fvc zD)LtH0>(;RnE`*MHT9m-S=sE`<6hrR#$MPd-KOUZ)@*mQ0`A2!BlAmVYx_A~u_f_`@PaU){>)jRIisy6p zSIikBP}?+B)i}2FF_NZlLbT~z7cD5){E(d^?O=P zttWHQLz)EtQA6rkc-thcIqz$f9;Msbr+Q~lzBK&uo(-`tOIf_k%p3V!qJgJ^pRx{4o zDDJ;P_I9hV-3mOr5fel&RLU!ar^0jEsY@`Yogt;53<+!xoP*phO_;i?^L|(1mH%BX z!#Qx11f|9Fc6~%j2Z=+{V!02^jg#f0@@={|k4uNxapsG4m@%G{;1t0p_6QS(g_dxy zTvg#CmVuF`t-c;EIZKlYGZL|IW&?Zd(=&ed}q`DadTs()yUc z%5*7KvyYI(kj=dC2}x)}Xq(;1f5~RDUe9*3&^T!17~@P5HrS?r{(rQgm|el1Zrw)B zuoV5(b7P;;lg+Awb;}A`Zr-QGyjq_)2kfr)eNKa~P05^McCe1JE!-x9?kEYf(P&U^ zS;uJLoT-=8!`eK2_UnvmY@5H*>$sI%#J#YAS?q|5XpwCyY!D*sb!4fPl=9GL`jt}H z^^)d4h0fr?uBvhdZ7o+_`P|vAv!qj$!%xav|r5w!X zpWP^a{F&^pH8>C!NpqN}Mo7Jwvl^4+Rg5ezvdG!0&(RXMfz5iVIE1~rnOI#cCFUW8 z!-9joFI*Ik(L%FPST6iTKIB+ZQhSng(h5eg9$BE3(W4Y&lgul8XuXd^rQ%~+V6@^E zaF{`K(*88Ko*)cvkTkz+K6lw9gnsBj&!lhqJn zdd%)SiD%+lw%#q9pEopVT zfTq5fwZ;6_tYP+IKdy$Ja3Co$y+9prna|A9R)Bk?7N43@^p2k;IUomzQ8b%k0GziP zXGIsDh`V@jHt_lRgwItiK5d&hfqKyqUDh(d*gG@R%|N%m)$B&^WR5x6+y^V&jXbZR z`aJC`eGhZhdNZ$ihd!T4!gy)8GS}Ojw4OlpVYF|NF`Z-12Chc6jVk3gNHrMh^?Q$d zTDeo)e|eU{f3J5Rb0@pophZrW777pSUr+%S;ZNe`&Mj<~Gyh})y^db8I$qy;dW^n} zJ@vI#l0>#H#w@cj)BaPtJ&BF2NN3z^r`mgI(;fyM+l<#?l6lJf1kb@@p7=MMCy(&y z&9oxztMGe|Ezk@yQBMh7!~)VmsjswEj-khHsxn<>AL0~ih?069{m^5$KW>Y2QG&G) zw+p-Io31Jh;5kYmjo?$>m*rqdldL;5vu`ze8uK{kUurqp9c`61RP(W0htzlKY1|MQ zDiU*blh%&*?%f)0D*Xt3UGuc5Xf@WV_0*a8%pZr#;nFOoPE{YOUvS4%WgBV?%2LG4 zM8)?V@8etN0Y26`uDtHwy#=EFjPWIWm)JMg?c^0n19Fv&zZLT&x@L6qs9`=6R<|C# zh;7MZD&}qL$?uu~TbZCN75)ZEC}v+W+vsmdgWF}kz{NP;{E>H9Hh(d)^g{Yebtdh( z57m5pZC&+A5#%roA7(pL-zWi;-I&)v0M_au$GWT#W#E(&*~@QG-~b8y1hFMiDV zmxt`%=1zNSxA7E((0|%O^)+p$R#>I+v==SGDKJb$!WZeHDHQ4uDnvu-%<%J2O88k= zM_Y7CEx>#CoGEUJT1vYaZl?Fu7U~v$>fuQ&){|*BJ>uEN>w?pyUKqmy-~Jzb3Jigb&qlT+%J_M zl}U;wC6Q|Kj=La(PB7Ey$~01sMA)9<7Ul!!J@F3ty*=1%YT3)I1*Txe!SoqgES!9G zwIc2pJ=B)g*|Xu1;RWGx>U`#{Z`33_xykDO@IZW$1H^Ppu;XIiKnZkY` z*DAtD2hp7gBA#2SqcrwjjQT33aN@4S!O5pneo5Jo{Dz+GK!O;5IIzh#-j@>b#@o@g zSQb5by}jH^0)5b6 zEs4JH&iXV`AdeZjt$X%U@<=N&LDl3IxQlP3sW6i4%5LBrHSF*0mR1=wez|B!p9K2U zhIHo{G$Q!Gf0mF>zTY`UPWKda1=WOD^x_^k$$lj2SHg3lc|7NeZ586BJ z6EIKLxW$XHgSzFz;APn^$^Drpx3|07;xuqvxm-2mD5)okBZtq=JrZntamO7&$(4(n z#wEQX^=m)cyME4O`w%X|CNx;C(!PbKjtpmp`iE`@*9M0MAChLI<&;MKSteK*-KUz< zJvc5{Cg=*5%JB!UgGryI6}D=4Q0UK4snDHJgK&ZH4|LUU*FQG5T4eu9+3t(}(lHwo zS0!IbO-Q?vr(N3Z)Wnp8NDPnNwoa@ZumiZKC-5n$H@3f5|)thv{oD|S!g*n(2IFO zI?7$1PkJT_Z0`?+#$pjsWwLlgHtJv8!Z$%S*En^YpV)Lx*>TPUd$wJQt*i*T*D^S7 z3W;grLr0@W#ErV9qjc6?P;q%4qnz$eS5-Nlv@hKIloHBcN(O1Si@^mxlRgr1k$^V` z?0cE8mZaeW&T;Ds*`5XU_5|R+2u{w)$)1xP&N`blku-sZ*$=YM=Cltsr&ByhZN(Wq zUX9{)=mb|;J2)}BV9wWcpf=8Fm3=HHMCw;9lpN)?Gx`AYrrlNOD_3!UJnZ%5`MJN^_2F(U8U!lJPn8@c(c9a%HL^cU*hK&Ss5wz}BF_7geP=wY>FM;z zlcb|$KW}A+`iHx!3TW14yO*`m2+&sBDZDvUI43%La#kC9f)Aodp8u}u`v>p8&U%nl zKWBGN)nL2O&X7OUJ5(}wKWOFb#<|%wvvziB=HASuSxqwU1`p<(4)4&m>0W2Gl`0*; zDOk=o!@oVYM`FX&uT!(~7-^s69hz1uci}vz^V~|!n|Ly|a{Pd}hf%}O4wj63?#U$m zqndkxryjjI^~7zid9)Bd*XwW!|D!()ZB~026}A7-tUUt<#ZH>eOR7I;IV2cF(z@Ob zMl6lJNF(XDxD&*qKg&MN4PTCp^D+tIpRln_5Vnd1g@a5^d!6Af8J9+x|&zZ613oSm9)P(58aMmlB$h`qWjJ zPUiS`++LN1452;qZ!>s;A>geU-1=KU1*h;8UBM+<15BY7y%+W9tgPzk;xjzmqcS4~ z`}_Mx`DXy7bFb+^Y)djl*lpDmkf%?GejV~pMS!)F-d^*!9@CAm*phGK)~ zNx?grRT{0!^Y>Zr6EZ)~{5(s^j?I~x^El@dI^c_|yFy*m%b{K2o}obSUT}Ff`h@IX zbL!(f4252VRqc^(o0G|VZX#=n$Cnw^CT4Wflcb+=Z^)CJc0Da2^{dppsmF7NlRk<2 zCw6;WTHM&kp`Jw%eIi$TR*7BYa^5lSEzWwYvRuhUOA9GPEm0u6QwN268+EKjTG#L; zb&YPQ>qD9J@g?e+>HtmCZ}3U-zy=M5|2boS>(nHPtS(vP8FpiuOS_P>Se1N*^5RbM zDH+}SosQxdXNTF-nu2!H&AT<*p2{bykA24}!0nJNxWq1GdTeBOzYPw&i*H|ofgA@< zC~7ye_E`_?63$`HIC?DD-=_#&#bIP~&ZlAMxph19K2qbgNMm>C0^{YAbqSp@2%o&osC39-#g?D4$4|(^+`!(<7 z%r#k8vX~jxde`uN&c#*1l#r2KCwp0Tugv|~b2Im3CFFdOb17tmZ0)&u$Zje9 zpp^Bu4vb8^n=~`!YFbX78hMN6Zk&5$?pJy8rg{^a#n(*yEOBCBy039m+2}TrPn8R< zdj3+8gOneIweDYiN99bjk9}2MEY>zkYmrO|x2XpeJb(YVkXg{=Qe+WaCf$N7#eLdIf!@c3GXpysd=(#%Z=9t8-TE%%bmqdOzv?#rLB!Yh*@{@@(Oc zNDX~O8qMC&tl;j@p`2U66P^D+eV9-?J|?kqLeoH|@2r1m;ABKqHiZ_EaS_#(EOEVOo3E5| z%-$sQQTmBzj9vO5w%=OX$Kkf>O0`;OZ_Y3hymy3_hYG10oM=D&OHh$Q@K4cpiap&} zs9!XnkO06NVQS?e^B&?^vKFk6I?1%fK0`JmScqw}dwZu-Gkk`Q$df^0X z3_ISJ&v|}lDoQP{Jr5p6H3y*ue9105#Quue@EyGY$2ecRipBYS9~9|Uf%Pc z2HT9{reG9DU)EQP(-wqphrbA=1sWSk?mIbDKJ*~mCR`N1_h~gv zYY{F;kL1qKPr)@gk8&oX%6OesEoUP$;}c%nOY|Eoa*v4kIQomk61jd(9hAB|txQ^6 z?q0c{=I)p0R<0T-6^19QiK*dx?fu+eEaEGruej9R#k)to>l~7Q^M32f10&kcHBJ$n zn${kg?FyOA*{SzyA+jO8p&=nLc$UP_#J#l~*Jl^>VuwLC5@9+UvfWh?62&!QL!O{RVm;|Ev5y!QW}_d#P3shr z@(AC{%g7T*j_86rr-(NnD$r@(z21KA;oiHhu&cYvmQKrkq)I{sv69o8W|30%I;)g3 zgS(`leTX#pu%UuXFNXatsoHd)e-XTuJvOUr*4)f%{((W)H$?( zUEy|ERNLXWof~eXP7D=9h4B|>uM79ijqqZ%oqo#bW50Hyr2b$K1!9`VEl%8-R63=9 zo_i^|QcI-X%QHGPKCwZ<)5N8Te+2gVVxpc!jq;Y18Y@q{)!dHr(Anp{@2)3KvrEe* z-0Q`H%!RTv(AkCNGr~Nm4GL8zpK(ska6GEv;7R<`P1P;>3$ThfE8AINzca7XJ@c{f zEjc3Hg;~x`aO3s%9kPO68eNzTM}t?aA_3(g3X0LrJ`y;8WP|GmSgh{0*NG7H*0kNz-}l?~5v*ufedyGkNzWIJH1mck%2TB4^<=N}N>sC+36v z1;st!mBZ*mROq;R!0!GfSnKa#W$le5VE-p z`k&NCswkC`R>8yV7ybjo|5)flqohN}Rezzmbk{vVF5x}q`__9TVpBv3Urm3dh<2V- zUtzDtdww50>ycE=?f}=`8pfctu~eIE7BHG=qm2_Ni4N->)K9b&?PYk9T9j1VJmEc| zuY-$%qsUO|pX1Icl07}=>zvG-alrw>ccC8PW#NCrqtz&_Fe!*#v=i!M*sWJ;S=>Dd zY+!Y?UV3liiP00r&n=vj_PJ1J1p35ui}xmM%+)rvaPlunt&&4|5>mfQD4uXRsZG-7 z(HkPpN1u;=@2w&Aa2@j9Qufm*cg6Le{EOAqj0Mf^V81Zk!cZE84D&Z8j8eur?GJsc z)+y9CR79N;-jwq;cu||H*Hu+@mQ~O3lG5IeKAU@N0BiA^6tTiM3Z7NOmcCl9`F2Wak>;>TZ@qX zlf3fRQUO{L21q*W=p9@E=kQ$2hp`w#jOb=C@KtC4I!I~K59AkXJc}RR`K`40 zG$k`@Gz$7(Xu|tJ(u6o~EBAcQ-blxDH*&l$kGBR|<$B)j?la_xt%gI8g{5?k-6!wz zuztk!nhms8dR?ZEdSFi3>U`r*ZLxM$3#HsxgxLWADV6~j#!O}Uw>{U4{ zas~%8gDvrV7E#-(oz!ggPjwr<`nufnxwL@3M}1F5&bMk?EhGGyeoD({7D9RWgzm<6 zN?Ex^#E_`MF>!I{;|eEFPCA)ub<)?l59H~Wt7KyL(gy|1r|{8zQOf;=2Dl_B(g9OOE8($zLJ`x%E<#)0Meh60 zol$6Jn!;*ZOnP3_VW*_a(#JHhX2Y7kB#Uz`{!dBXA{CaxAg6agHY@OZk)&_8aqjag zoF@A!gEMNdyM=tkeHpFrD5kU8?kb+ap4@b%EOfO{(v?Z{CZ@|7_Bnf%l|hPpvN-_t zU~h9ioMbhS8Tbo2WHp>g}yxL@m)&FKP7Ad!T~)O@FU%&@buBPys$Nj~j#G(!wZL<3&|I zt-OeA9aSLu>zMBGHFK?xUmv$7u~W+9Tuoz>5~h*f_ar(xuq)v}T>nVX(MFE0&YMM@#@0Q}OE%_a+FSa9mF70S*N>n=91cF0sGC{|a>RGgZjlq387vge zt2M%Ry&^nDyTqSn8F}RE%pK?wE7+HvZnQ%U6BdYNrQ@9H6~!hr{`?MpnF2=?O=DSi zbOXP_5zG<}2#4|fe8kPINxzU*H&dQ3A0{8er)-di%2VV8axuA{@#=lX5Ewvk}$)Mx1V$*!ob zch`H9-abPAT`SJds0l~DO?MkDjLXI*qd8OSTXR1%@@>>BBT$!qL>oquy^9TYmTlV| zVOviL&&6%B*EcSrd1Si3ZlF`l{6Om{A@=8lgt)GO&aoF0UnkrM%nWo$*c+D-ImK5a zYEsmD_e!a@YneNrxY+#1I%iifPph@H$|!GTwK#gc+u>v3ed>O+>IK7T+Q-_8@QJV= zOnZ85;v&=94;LDVh1gx114ZiUY1au%SE`4lvFA}J{q9Szj*GxDm(zab>03HPUR9_*G%S+@hGY`ve7&PVSX>f z@atwViGD0?hm%#rdBR{$OHufWO=Lf8erx-+^_~5lwU@K6DpOZ~*uBx#TeM!6tv|UH zqwJH`EdG8ETE6yVq>MwWCs?D*Axyw)nTQLaqv&r|G80ffw6VIP#+=AGa)FX&rWFonYGw!l4IxsXa$#>4v3+-tU&nCQ8f4d&as+34R;1uB= zuE1XQ6`We>)^a0Or^B45J|6#9A@e4#=T2HReW!kl32D1J5iH|>#h}K~-JXt)L$zta)_xlHK{f0Ay_xyXbW(|2_H1L>-^tl~^#hJ;B zx*Sh&Jly0C6c$IAl}5_B(SP=k*KsO+&}p2OAIdZ2hVoQ-h}=jn0Iyk4{ujMq7tXH> zoDOx-#$9Ef)Is%f!Q@S0!@U5yREDoe1aB`63Vjg8g@&T?EIQb8yc>JXJ?2TYfGX!$ zF*xJzQDLm({rib~bsPO3Em5oehX$+#y6xGVmrK}*@6izV9Nf3Em`GDD%#+wooB%Jh z9X8(&3O)(eKTht5rl*ptoqK_6xRQqNd7ab`HT@4_GuZH_s7uSiwM~-q$V<=^+{L{% z2xMY6y2vgxlHNlB^CNe9GgQO@T!?b3Aik>u6$>|Bge!F=^+}3V$uoW~fn8opZe-9?} zg`JNS=45)F)=P@#l&2X9JRwhAPZ3vwYqMvK_k`=T@{d>Y3lY;j4I=MH=L%en=p8jK z_I>o5$RhsZ(JunSeWyKld;5wcQ}nQ(ED9-6p-5n|Np%Y2yP$dkby#7*sXqW#LtXI5^(xtQoYVJd-J_6 z=V~>+mk?t?iwnam-W49ue6pKA#YW*cto#7J;{K?*AB(r8@6exIk*<@+RG&m96)a>a zfBM37ob{5rp~UDb-v>{t13q>FjO1U`gtvqk*q83S4h8Hi{OGr=HP#_C14q#W^}x4& zg}p7w2%+%)8I1C4Znu^oTrKn)xD)&6JM_Wmwm;~>Dv+;GM}MfPTDo3?%(c(;PxL=g zg)6wkh8nM#vc}@Ro5yK3&RlG6=hkn)9NLGb*&RZba+=iL6Rz@}D7Txmx{ElmpSzQN zzxpToe~7FY6&t%Z=62NNz_Hkou`i-3N1Y2C3z(4;Bj({Y&hH)N9_UH-M!CO}D=3v+ zo#ii4sN4#o{TKZr{@yD_IrBf*f;elmW%8LXffrS#8Pv~N)!*?mVbfWUuTo}a z=?N-R2K?!Upoo8>4>Q2`9dQO{z*92J?~~2?8)@f0>2p5kjoGA%v*X_X|1)8nSe1Ki zh?vf4(F#Y1AU@*B>&|pt2MtU=o|b<&5d~Zi!+2Kq+l9DYuXAf=!paQc3_Hy;_964o zM-R_45Ycp=>yF@BgSmhA^UP-By{o{l)ef%uD9_h^SiXbYQ+{r)Jp8UB#4#v5YKYCn zay(-$p0^W1w%Ai1hvM~;Jdbm>4ILD#IJM59hii(G@Qys4Y_tryF6Zo@oK{Dqn`D7r zWs;l%o4cLaz8zD}eiS&XxX}uj%M6!sA0O*0eA8XDYTO53;Wm1!MxYV-OFgWfK|NYj zZK76Fv%-bcG3wv!S5v_?K4#{+ujezGa=(2KDt8hjYl6{_T(xerEABUs;JnU_Gj5tw zM9OfjQ|c>I+^^gR`D-PQ<~`{7+dC=hx4=DrsX$_!62Bzc4rIjch#ef*A8^G!jZTl+ z9F-b<%YVuD(s#o5#$Ao3#qvt3)Q3*R63$#Jo$cvQydzh*Stn}8w14&fS}IzWR@!p1 z5$A^=hHH?{+m=l6{Mh>KGg*yy=^w!@MoNE@ z9})fkyV*ja0Lr&R!XVzs{GkhZC4*qooL}L$p}$2UH`%R-^(%V4;lI&p50jI zEgRxKFrX*)-?nO3gIP@FY3|Iqv=AlOUT)%7PF0lH$54eG1{YreFSQK@z5=Y33RCu- zFq+fn5GUhGZqveIQ=ZtO;#l}xL%JuGmfI-0^j0b^C&74*LD%{M82i2TQp7a{^k-AY`to~0e zgj3ZF>*4HhZqj`ogjd4`_f+?R$+$IDP1crcLzvftnwQ(}NBsJ4^~HK_QstH!4a`_h zwQAgAWrW)JJod=%>0x^4F7A5hZWz%gVw|t9|3>tHK$)m^(R%EtnC($JqhsQ7#iR#{ z#Vm~dH?YDl27Zd#;~V3d;9cbDq0E$rE0g3Yd~#f3iu0B|d$#q~*g(tYUA+`vhnq}> zC!}ocRkwq!H&fMcB1z!`(claV{V)6}$vPLq-9NaG)aP1)S=SsyV&F9MvRT%C@j;Ek zwCq6#m&V-N9#>dlF$IsqNit*e;C$XeOJ7_0j?_YG&AI+sy2Nf&Pr3vHGD?n@e_`Hv z%Dl0JZ9N)Xt~t}M;gkS}T`H{O968D4Ta?r5OZyMYusYbWY{vVno7P3nxHzVNFN|4F zd@HT(C=^LExVL^rGq9Si|6k6J0iwp%m`ln78&ecaw>XN|+NA2(j9hz2+lCaJ=oVRu-kV{$W3d=GVerxTVDz()EAWb?g~;RoGsU!jq+*NXVONe zjafhZueRJ6Xcu#88TE1h{R>pGGPHrl&t+OAqk`Um^q-Ykb+b-Yd?>(X2sv+P|H6>i;$mQvb}b3`;py_ z#O+xhCU7AOj;&|JLHLY{?rq8)ckYOuXu-G(8^#})kCk=p>SN-7oHe4gNs7PLVLr@ zStN?1g!?=c2oIp6wwo~sP19&?vsMcV;M1_ImelG~7GPSobAn?a7!QQ_q=3c^8*(!26xTnqo9ow15K zBP~B!B9Sx;IaWUSvCe`ZmBi;U+KGX4p5!dz9B6^w{W^Mq$=v#FP|u_ZeZ>}123oV% zPLenezO9pdS9&hykvgL786w89Hx!XJfUkcfKA?H{E$8A z5C_=(?Q7^r53qBVv0mfFYz=<%1N)F{oiPjJRjCT9a|F-h5$=t~Oc90Q-gn|?tV&v2 z5KLvQoL}jsG*aHmhvbU#5~iIPX)@DI8F3$H!c9C$XFf3YJl7T3VUB`)JOvFL??l7S z7sqS;w|s$C(~YvNOof47gV)3DPNDCrwOkray%(mbvSiTRdjhq^74~SK^gKxJ}AETp!^(SZ3krxJ12x`XaK+QjPty79rwKQwu)Hoy&h31a4xVSva|nq zR7}*ch-iP^=*++={|*0@KwJM3-#zbb-z7SbCV2LGLL@(A!GHE+%jhE{Ig4!(y~kD4 zXMV`UZGy9=uKlU?KyPe(Y5s-QZZO$Wd-d=2iJ)cqbU|;Ut0;e4gX9l~r>ue>_$+DX z(Tg8u z(@9=@oiRcLrx8yxzuJeX`ghVHUUPo@%h&LekXLFzUrY#;QkUvWyQE<>JB(6(P*%w0 zm7?-<{4n)7gKFZ_?25x?5cBg+n2kC3bUt$yzW@nL3?4dXL`L zIQyYB)w#;8mn7|xb$p4X-8Uo7`)@_;jLaK28PNRS1#ZMf#^;NRi(j6&D&b)4tC*d! zf5aq5=ZkJgBShcGA0wiC`P{#`dbuq54SB6!%VmT+C|eHL{b58cYo7TH`^S4@wYDbw zHL1RTqXV6z{;oDaEf7NWaRI%@YFbAY(T3O`$6f+0cr9Qx{zB0lGLDhvPV$ z3nQN>WWn?0V;^1wKBuGFo5Ixj8%V@SRC^^!`_$ka{O~+enf6ynH<(CQg6J-VwXMN~ zmWNFtS)2vB)r=Gm0kvl#Zs=X8cK9H9yoqXO{7~aiV$RcxlN9sH+TsXu1g@WvQVZo@B|}-}?dLD!9~tS5 zS`sJ~*b$XK=I6M|@r~k3B+9u~CmfHv9v2;dKlVaQPE4Jcje+V>4q$cIcNlPbSe5_bPne07V_K^0;@n+CqHp80Gr&%;Z4wtvH| z8RlO354Js?d%V8%r*sHMlC~lXQk``2gL6x20?pHs_f2>!T4BMMu~wd@Gbh z$=d{dKq=>Yv>ba$^DJ*SW`gvhKV5452k+F$I?becAGGr(NX>0LU=QtbAD$Ley_JQ} z;rjM5ofhKm+9ST^$-D(#^DRozmYg7)#Pc-wU1aX4p(M#0XmhAjSPQRI%W0Wv6|JJy zmd5wt`hCMT$JmXWjp8x6BP@t4S5povUEHhD>irs-*MBdnLZC`ix4_Jp{BeEbuEy_8 zte5yh{D`<-am!*m$CQkz7}GDhPM|PcD}cZOc`wU4AXCOcRpxH{)I(pK9CqeJ~d??V~kQtBUQ6)bWf z{?ek1|I8-f#?P%;IFp~?R{$Xs&ZCdM#XDJ!%14Xm!eP zr-$*r<;DS@A&-?Ovi;ux1xS}$feQcfL5~IUpTU0GnNM>=ru}uC;OoK8!|X#xQChsT z-h+O(<({h#z8wh`-rZWkGoV^F|3^K0s(l@;$8X?jRk;)1fSm+jqI!@_6)XKu>I5s@ zlP7MxR0T9Yj_>_Yi*8a{^XdOd8H~%n1a7O=uBNU*u3fGUIE(~kJ&OI0m3MNMvR!$K zKXe1WsheP2D|klBGL>dneau6~ui(_RXbKByQ?z2JsRydl!?nXlLaRbqsG$xsHx3O6 zp+`aJU|!SN+zl&W0ly9Rh7TOb=4OPafJNR=yYcfA@w9XYkF05CnLEhCM0Kuwc%@F^ z(9G?s;_*gS_NV)^{DT9_0>4E49Vil0H}-by?D%Z@-4DlYj9me~^-XkM5Ymr$659HY zMYQn#OWxL0vet9RTJOdCQ$d{K^kqAqX;y`2^ntr)YERUQXpo&yR#2sd>V2?LXa~v9 zfAjoC1oH=f2#TTkG?ENZYifDGP!E9j4%3$#`K@DgrtPy=vJX@PdC!j@*e6~?HC2{< zt_0rxy6lvdq=xXqUGdnqmzQz3-r}=AlzIQOtjd)Xi9cTv`L(o%jN?B1pDE0li_mI$ zL5@lZi#ZEg!0)-@8QayL)GHEE(eNFK?PQ&O7AC+Zb^bw4(u@9kuJ7`SnW*0)tg1ypg9 zQJ*(wAHSsO>OCfdvT7tg=2!SL_QCy*2oEFwd0#j}^{K01gEL7fdK@0eJ<=DBu%p(J zXJC}}ggvk}nJX8_i2ffs)TQj%gM`;KyzG}7D~oCNTI{aqE9+18&+uQ3QUgZ=!vfjS z7h~$jZjH?se=h!U+^X1lu`#h?%%6cKfyz+{elgM;kwwReN^X23-kX1Mrr%{hKPZy*$z1CH~ zr)L;laX7|tH|&CWn?o{T3!yqJ%S&$M#whSqe5|wahpp#bzKpwZAu7I}a(V8AA^4gL z%16=JsM21PBWXC%x6+O=7Qc)~I*VhhGkB1Q-?K7FDECls6mc^7J*wD+VA(1#nVh$_ z@FXc-*UOsENwJ@Vg!Akn`9Y_4@)T47z1&Fy%;z*_{)bcdy6`3UQj}O*{*w2y z8EnWFzM`d46?~S<>F!*^k3X=J4wAD-jou+Ilo!c^VSYd2gm{LZ`yVEno%jnoND8mw zSP=EyOsUgJYdOV?aK!$~zDj3C5o?#ZkbR=5@k}p{pKXlR0j0=RTx?&e)!8do)L zjb+BUDL3#O^Ze}n(bGL*TtvOdZjqXQV)V9{Z86Pa3&hTkE*|(j@JGy!=-jfSbg?=m_ zH^=~do_E;`dh&GnNJ)=^^|;BWJl$?>T`*soNo;V>&EYi9`pl2nTmAo`6bfoQUy;4ni1`lY;SHe{OowGcOGsL}6P!e|Sb4f5zW$JO9@-JyL64RzG>Am~>Ey=u41FF-3ttG9 z2>%sqj~=vn_()hJOQSYQkQ{wB32$9Uw%o#2A1{ovONhxrFZp|Ak$a|lVoXx5+_~(y zt+9)f>n8R~lyhY#?@o?NIhax)wM2q1P>jUgPJZ22+?(55+Py=GS6VoajKB52j6&uY zRz7>6-R(D|9<>t1ypn&*texLxn*bG74oLEMh>jq4r$TVZo2(ng_ts}9GTxwfC`jhg zEom@GIIYDC;y;p~ZQ+jcN-pT#8sUk2?2q_{cvsvPR@EfBwkV{sR&HaDM*J zv(mjmnV|HSSAbd{c6W8nbk*h+{zn`sjlsw82Rw8?`=Oa)R$&rZuWI2E;nSgTaBt{E zZ~_j(XW_lf0O!I3wYsVwhe9!JnDL1%SsnDpZ0@z3g~C>8i1-Pu;x74~OLgV*Om#y2-M)OZ2{JO4!Vn~Fv;ifqYXg^vDT&o7-n@h`0~%}i~F1uVHn>l zk%)Z}uU@j%1BFNf=PaI#w``R8gz>Oo_v{n)eEgkJ=-cYTu3mEDg(@H*&z*(hCT7tiO**{2E_-OGR;w#ou;Dy2XtWhF95ZR};c!0c)SL!2HI_XZ>v4CRd@G7GpfK z8^U)dIqS^L_9AnReg=hFs@4a@RMkZ3o%xxuO1x#~GrC)C#AWIpqoKG8q`Zo8QhaHy zv}Q01Uk8!BFIEB9Xe$0bg5vn8U2@JOuedLrkTj;{u)k}L9HC!M@2Lz#mDdak_8tRS7T zf3Q1{Fw@_?T6+lEw^8jb6jEyIjjX|5w-E$0)L=2ckVhHm_9@qGU9s*-1FQ!A#jMIait-B;LSW#y9V->f!1 zNjPH+^}W|Sle<5`-P8EUH%WY?Z*wOE3kzO*gL%`^ot>!jHVa*}Z|z7kLeEq_3IA!% zaczBHSqV9-%<`xN%i#F>TS>Mz2rj2Gso3fE9p7SMu~ov?Pw&R!hP>uEIYf)mmr6DLZqVgwr8W+~@x7Gxt4%zE#RA%w z;yeAk^rLW4j5Ag_%#UyNSnyTIfuO)uhpua>jUx?V}u)OIdS?Wg}Bt-xP1+U10&k`dQk z@8lF`_UqvkPz39l{G-;0J!ysUOj^o}(h&Bzt&=Rz)6(R}Ontia*i3R~i)l9O8T`X9 zP7isu>GJ$-i&{0m$4)eVl#7U(u}%sIxwIqlZ^Fy0LvdBqmpL~B5749hCp~n&)+);X z$I)4US8;V+czou1Jb@s=-Q8V_ySqCd?i4K)ch}~w{0|S50=XGE zbLO1A*Lv4s*(01m{kEJp3~^0Rvl+8RyGlPNwkY?U^`tIjep_>;v(VbC z41HK{vk)y1lla2=clt5aSl;frp)6I_$9zymYm;nK%$<>4QFWB-#yQ(7eSp!1cx_wn z{b>gcBXXYl$~FxRHp?(&#V@*tP1iOlb?L48U;J|4B-hx;SaY601gCI~h_=uZ9~F?I zU{)ni{`f-t6q9v}WoWFGL1%xZI5EM^`&-dPVh;y%5z9SyLNmE7 zTy1qf(NowXZzP-RMX)a3s?Vpt8AHfQbWS5vi*?-#(@;AmX%W32v?KSmA}kcpY5trTmQxKe!8uPksJI-4xJ~y8eU-E3RO*H`RO7iyrfJnMvvK*AbJ|f??a&}$ z2a;bFv){tox#vtr^$j^j*rCc=SNFKk5qdc{PD`LN&4=tIsRgYIljyYLAR|7Fb{S z)&7>{p6bufeiVMF!%Zizkr8!=wk!H&pS+rk#zNkW4P5RyoGKrybdqcU-0{_!GXQpX>P4=ziqGFL9fW{Rbtpp;(3-M zM$4nn4{2idMXG0}ah9p4j08KVBbXd>plD->ZNwuvWT|{h?YS|6^J-CM2PVij@XH>` z^oH(vJn-kOx32N+mC;srdJMQzAGq@RXDhqi9_ht3w!Ix^`q;<#lU6=$ zm9?3DLtjL-FEzSe5NYNfUu2eX%xQ~^H_pbK$EX3d_LV~RAnz(mDEREN2xnc z$UXV>`u~FKqF#kJC_6kkMD*nxOTz{E=}b_Tp^@9EJ*LW9b(!Z8ny>1N%FG_s&|DXu zX8UelS8CF-t%Q$tG^ASw&p2LNE!3{o8pmL%g3-rbQ+#Y}=I*IwsTx9;aBF_P^H_R; zn5%M(_RiBV*phrfx3cNKjzn!Hvj={-?`rj|)zmX>nw6g}D=#K8$YDmN=!vNlN7_3q8^2-2o`-Zbc&lw%^?4YG;@y8XlX#q>BuNzt9ym|r@GNrMSe&U zLX$vzR1NEqJeR7;PNG(deQiU?AI5g$BD>$Zj%TAi`(FKzjJ3X~4&+ET(;p}gJeqnz z9AIBZf0uV@*PQ=TcWM`iMrJ%)DKyS=QF4o4oYO;X$gAx9NGl?W>%+A1?+1;uh&PQJ zXzZdXxr%jOT*34uJB0>1*Mv6O`<4jCzVzbDZ-jFMk9OW-8v#LNU=WnJ6_>(imkghraUc z#Hv3NC>!iiy3!zFiu%P{GMn2>&q$1$VQdU`jy|YtW-<+*c~i+v^&vh;oA@KjW3#xv zkJq4BKEjuR-;ty}Y_h2FHmYP~B>z8lf9M)r)H5=*v3mpCG;2xJ6@@Ym5ESbA52>pr zTt@D-QW3o!|4h2!>LV9c@3^1pKFOr!+ftR&>M|QNr@F`1+l*5k_Ul@v(m}}1uwpYJ zBy3a%5r3JD`D-fbjNGxTZR~B^{+|=L;qKkP+A#OsoBglN3!d+o=?4ngR9^i|_UB{6 zLz!meH0?XNO8VQ;h#H!yCA`&Lf!(eya!~vfrATkYCW(K9UTg0i^R(}JHM>VF?@+`= zdI$SzsRoMo;dot&{vH!H@D%pfw!U_LRI4>r2)%c1 zR)rI#C%?hF%@HDtr3u*%q_a%D=vH5ga?_&Dr=B9$x@Uz;5Gi^+KHcc%n-Mof-4&rc zUBFnW=JX za*XQ1Aq@e$w0m}!_E<}yOIW*=BAhQg-FA$Kz}oyn*sbuvH_2N1BeK7%WY#D9JbsbC zJO4LbH$0j5(hUQ4VKRFx=ykl2m!kGtO{P78VCmrKw_&Rm+RX(Y@s7`WE_?UX|{t1=v}lhv@G3j7;h2LL58W=u8x~D%$h- z9Nb#_r%-)R_O7dg$48-r{iFSAA4BCBbg51WEIl!>fo;rx5gVCBZ7q$1;T(=; z>M^=LlqU^rT4XOe$5n%qtuj%?1D*9U&bCl`wL#Z%C6!0ws2R**lOb>M52c#gv)E;s zSNUx2xxr5STV}2rMQ)--iHz;DUHtWD)Ixr0=x=%fU!Ts%{OsDSMYPe*J91k?<|XNX zHi7Ghcm8*x730) zi!jB0s|EQDp&QN*nkueDa$j<=9P`;$E)rI)i>(c{hDoLk*&ULZ!&QxuQQrv9K@oLwDlOHf7j zsZmJXOy6hZNRm3p{Wd%TtMF&RKZW=72K9+?(s@A15ia9c#}>(WX8%GjjszVQ zMlNOp>T4cPNt45Cv-4>P;E7U!pX!m5MX}9%u zm>=g#CHbqUWC%oxQIrcPM~xTkBXt*lQS&PK?7OU8kqT^O;Y6r0m(KQ8+LMj==GqFn z8!Ov=4%%mGe&s6f*LKOz9E@Dq3JH`N zgS}!NGfSI7_5p8?(c<`<%2DcM%?UTn z=^E^W>Mon)DomtJh0K?i>jR2)C(!fHOcBAub0 zqz+r<%>mRxqOU%gP9&?MzM5&aAU+tn^?;T&&U!$NGb({s`^%h29JG33HJ6_XS+Dhe zlm&vxS-liJ2Qm6gw9FrjXXrdHzmcTrcqM2-RJ z*GH@}XT!a;A60<8RDZIHQJemqK-S$_LlLMg=lxHmOk~y`GT)&F($H*M*FHTgp`lDNZ8fyC|;8V3H;z$?Hnfm2)Wq*y$ZeZv zwIVdI-ktCmRz`|SEfARq^aL#;Xr94$&Sm{((C@V_;tt-yG1h=4emJ4v>@TC!*A%p$ zD{wt%R$g3_#}-F&sN(D+N}~=F{;h5W>GmXar^z^vX#|I_u@R4~o#an5Klu&SoNK6& z%!DG~4X$n)Qb!k{H&>t71)a%$XugW#tP6urJ{HQGyEx)oSVQNry5rLmhDFm-^Yoafd%{&|!`=KWp3KjTN;<*)vYrPk>%2zng>d>2vCq&%a z=^*NSv&J9=eHC=FRX_m?SmWUuTSolApAW=W^d-LH=Uc4aZMYhL<2o$@(>e!q*u8Ni z?W`HZSKR+DL|gMJ{6Kfip16w5Q7dYJbdR#cWgJg#s2LbI3D)9xb3rFt7o)g2#-W4! z&*}7dyak`$k@yV1QyjS&jHPxskCxE%9YsdLNsI>(3f<9! zfzj6j$Gi*Z%!5h0TzU!afB57++&v>+y!+YRuX z^|$JPveOHkfr`J+5l4O=XTB0oUooOAe1~gs*JIF|Sqv8CCXjW0r=}do**`{Y@EUSx zENGNje9cf?^;GL8K79e<#!Ock8m7&-`welm3qj>x1Rm$1#4N%^s@5y~>EGaKcgH*b z9j^K@bex~y8ZN>2KZ_nv146yTaW^iSDdceItn-rJ%}b~^-h?;kI>wFzSH*yrX$E?m z8*!&g<0$T-W_cW=GXZl$TX-+4V>EVzLhCEWW+TiDi!nm?!0qEgMRX>JwVQB-O|Uku z;U@taVPWEbINon~7B7NcvIn0x81>WqLj!*phppAnoBzV6b%q+~0(9Ix@En%GkJ0G;4aB_r22WFO{F`{VEM55FL4wOk%p9GF zx)`e|L|4$quiz@&!gxH7Pr8olk%0TT7wWZ=cp?*VA6MdvxpB6yFg{YjuUd*{n!pNa zCC1M>JaGju9$nBP9>EN@2-K=Y@G9oOowx!%`Y80CW@DypjajxeW{WrYH%l;*N)VlK zZpFbdI0ko_6NEDYBhQCtxfE*A&+(j`#{A>K|G&d`jEB+*6(^!7zA8VSfm84{-k|cs zCs>VQp>b^son0EVm?=miY=eIr!oO{U8FaVR1IPIpPt+#d$!PG>2H?Ij7?F)I$JK#i zf(EUyJHBo({>B0KWL{jSbGT=X@U;_geED#GEAf3r^uJnS7OjrY+m1KHTFk~>Fy}h) z#1@86-ihm98=d#s7(25t4lcw0AwySj1M}+auzdG9^&^{&W86RhwC;SzcLtsZn%|aLo@l?!#ReSj;>QNjDex}FF|&I@?<@} z$4SguE0JJb80&+97@IlqC%deI7>Nl)9Nt)K;m`VovGW>TtuAB=KBYYvx%r@DgW3w- zh7-7NgP|7ci4kPsZ?0Ka@J!FZJIseGF$=2iJ{UnKaQ1eLw8NO4r<<9C&pc%9GyNb^ z?XvuMii^7^1y338P^EF%M7A23+O!@VRB-di25g>5bHdQ`UdTq26f@r8u1L z0FVxz;u?zZ+aocMD#QJWGv@8**zrhZnP!v|{y~lPIp)Fb+Ci(eT{KiB!gO?2wfZUP z#9=1R+G^PJ>F}`jGrNFvf_H~C%zP*xH^#!DJd*uIHj!oH7Tw7>qK{z0<{s9@HzvPY z&xx1x3|#vvn8&vpU!e$KOiP`rJq69Xf;ETE3$<`-T%}5CHZn$dfGZ4+Ia^sR3|5jy z8>m(=4~AQbd!?!2^w8NzV{J*84kbrQ%jK1V=4|SL!3J{M|8wOg8*mjIEm<%3($mt` z)zv4@9+|j(hf6_7OIhP?@XFTqQpepU|g%ZnW1#kQF9M zr6N_rX`sUV3=|C>4%7`C52OTU_-6;Y`3v|SXQlYI`BwSM237|C@W1dC@gL9X4<6nb z-!y+za9ij=xKpH_G*>R8<^>V^urb3LOE%_yIQh8MIcMfwT;OSen+0z0$t^N=Q4IwmW@e zW*uL2Fey@APS(mB)vcXWGv4XA?0;zH7JUP8kj~BZ5MJ6^JMKHHxc9khgL*v9wZO^Qe{iel zxCh z0R~#vU}j)afDZid{hc-2+dh-ZT;*-)d*@#h8X_*#DpHdiwc{Fy?>1L&872jXH zQ;BPZs^+-Y zd?oKtbDN#$p2APZSx=t0a)|{KQ{#NmH9EWoks>5m(DDuzmXJi((q_S)LQy0q%OTSb5kR@ zH9}RJB3OJop@U#>ZdRw4fb89l4p7_4ho~ud3=X{KtIBv}qn?%Ls7UJ7uA&ax3hK&= z@@Mf%q<-X6cviS_BphxV-W19R)(W->4EEc7x4p^U?B0IfC~sG918<3}4F7;om&h@> zwe?;29J4J)mm-794XK!3ws0w{*q@&NT#PLTT@-7!g+@f-r5usQf$SOiQa^v6 z{NvjX;nzuTzetQRhB@x26#Xma4K%q+V_teDJDv&#UzYF6#-JB2TE#V4d>ty~4`h^2 zo0leM{No!R9;Z$x8nXFq&zuo=A9q(LV&rXogvZ=j_9WAr9!l=Ez8Htp{bI#Xhd^C_ zVSn4eme3Dzr<%*kNzdmR3l3Xd+a=*WKOGd|4#*eY1jpZZP!Sd(nGd}jxu1A9yeRCC zG?%}tKg_1&Ft(rYpS_3E={n|gINBn2Et`F;^8$$C-|bDDDX!U$=NwNSH@548G_O2Y z`YdnLKA9`YF!cdG8iE-`-Ng>Sgz1BJGz7is5H%z_rOuI|;hCX|fu32kH;4C*m-F@Z zUkF?YHVW+xT?$d5YQgpXTUi^u%eNy4qTurQs{9H|QTvKhg1rJYL!-s2%4~I_f|Rmwe0XVic_gfyA&c8~xf{gS z&p9`5pOp7WC!@DHp3_&A_x`3CWzvrPs+@k$%LUE_--iB;BuI9NmF~*(wO?i$UC2>2 zCVxV`#QzdUCT@;x=DbGxv^tU8e$G2FbELP9uYd5c*jDX@tjWtr^sEkg=1uY*Gn3z8 zpYA>v9gJBQ9plbp%TAxw4~UQb{W9OCJx==16~NE6ttcewt(lvmzVA4;Fgb zW1YQR%Un*^Sw~;nX7)0%PP;C43w`u&^F8$y3E0DLrLWpikXi0BZMpjVBfgyA5iamM z_&dDGU*sM9Mz$S&3#kZ#R#09SSraM|ey4UMudpSBW6t+6YZJC7=7{~|oW`fH=g6FT zRcS!P7db8Mg+}wBBB{gm78!+M4S{?&M@8F4cXXpKZEp8chDxfv358ybeDWp2EMbX| z+ip3ZxEpwOc`kXTxH~(VbBnC(a-%^1%yMa&snt@a|2piAgzBmc5n&z*Zbw;XLn!B8 z*^-5LelXXbE66os>(hS`yNzmUL$QCLg!gfJ<@DtlW3%oCv&ps13(QA*7tg}znlbaE z?|BkjZu?hmH~o}|Kw(q`DXWRv8O0_~6PraEhW`!a4rfM6E5i(iTF!m7M_i7m7g4I` zma7NScw^{8)@*&CS{ZDCe)4Bj=^rcqf$dWRRL+8WNh8$^6ZM%PLN#Z<*q6yeb1h5W z8=p6(x%-|li(DO9lvz4G!Mog->far#D7`hBGe7tX++g-Ivy53qH9@6dE2u*^;fNoi z+?8ucMM777?XwPM#reu)&Ck+&6+#W9B&D7_MeGqN7Fimq?=SDYh78y!?u=lZVf6oJWuh6VpGcfxEWzqU{}5kI7HX|ILWh1?`(WMvM#R56<#0_Z9L# z4*VHTlGp2R$wa=5qnf)w)S0M%J)2#F?PIxIRFavX-IGg5HIc~uNZcTuksm5|)XC7i zMMGm$j>t*1VYYEUg5buPKh37{SC6BUED`<8gJXC8s z0oh}<*+EnzxLlW9T?uw*>|wQUdRZ0Cy4BaX=R@U3u4_)cg@=oHdRZ%4wS3;f3+$^`ho%Ss=S z%$Ykd%s1W}&M2ADEHgT*ynkA7aQLqHT)L=yH9B(7qYmY$S9oigQ_o;+%fv8)SE3^u0=u#nWT)9MwDpV-CkyQ7zry?Pd79bW6+9 zW+|88niwEH10B5x#%oOEHL`E(i{<3y>ONy6d5pboEAKk%`5NVo+UMb1MQjt87uIgA zxI8NIBlIYAK0I06FXz;n7!K@^#*l@+fxzP@WEu$R~U<|QB*~i;Scer`TqQNE)Ofvy$H^%u2)i5%jKkX;sW@-C@C3U{_9dd zB#n*%E&2>nXul#2Vv(F(?k+tQosqMlgy2iR=!^0RzFk>ueObPQV5M+5FpH-td5!W+ zQFl`Iw*~u`nq2-)xr}l}%1taWFz>Z!i4M!bKuTu&U!Q(7P92>wG%#+)T`Uqg2tI8m@uGZ0D~}Gn zi_f%|aA$e4JPX}ZopX`Dy^b7e@aiz>PNa9_PGq}SUM`}Rf}XN6sw(kN2fm}W(lePE z>})t7vk9$)ATM*B+0WE->yUm#`6xaPFAcQ~RSrAFf90QAqLrIE$}Hm^@->C+f+W;I zLf0%IQRpr#x4pLIw=L)H)APv1)@{S4ZIlbc{eM=mw7TX{q8QzXJ<7%N9k>r*DsLtR zoAvOPdC_ljmSv@dSR3ic&+Vu5?HI99b8xAL<`$ z5;*C9?aRtK?_29n2yG0v7ne%BQUh!(WLTS%>09=H;@{?3U93>q@@2`gcgwshu{;0F z_=?;@MfFSRgMa<~v(~Tsna@KLb%X58EJlh~2W}iQ30Ygi>CJ3z+YML0n8)#B6TJ!b zVrgd+`m|a-I4*6=mrfrxeVF=Xz|X;1WyP?eG0~1?o_A3lJ3HZyF!_ifav{PwxZ?2_q-0+ z8LD>He3}cKmpyo!ca)YzyrKKSH-Yv6)1NP}A<#J3HYqsY@dQ{&%q!_@PE`N_iV(wGMoWT!c<= zIh^lNq}N)4B(84IbQA1MTX#>jxFLxfl0GN;;{JATWhdxk!UMcZetN(C^R04fM!M#o zB-J;LkWTh6-_CYbaPSY1ekE`eze><-H|!_vKW(YJllfwHL$9>7uUE!`w3PHcnZ5m8 zBOVPEEq1Vtaz1m4j_UUA!g#I)Glv>aCc2h2au0vN-)Uty(JW!g2ob%z}-YUufkOgnT?c##(=TG#R?tv^? zls>@Z}5myl&;+uKE>FwPzq_p&c9;W#or)H|3L3K^?AkR_`gPplNg!8^8tk zEHpM0h2)4;p{t=$;RE5h;rrn+;dh}n;mYBPkse}cDIR`|Jo*`9BN1c^x!NZ_%)7C8 zuQDG>y)C}CXk4LXxzENH+X+GBR$+gPkh?3po zot&*ZdyzbD{-q~jmw!gNg1lKl&8r&9YNZDFr$v$Lw@SNgbR!bzb8LzL6ouVFihL@c zjeSN=Hrr}XWFNe6sgWV#Uy@5vRoXaZ=7b8oI>@l+;bLgYzGKI5B;SUw!~e%!g>UUW zWe~|$H>l$FTV;@=Gl9N{x?2>Vi?6_KX2w$|iREx-2aFIhp0}IbQ29+pb=oq!ne*^e zx7QQ(J)kAF1P`(yy1PB#KAw%P(L*>mleHMNwlWkd_XqG`y$9ws*DPQ80s96 zgl^CZ%a6pqhx#~;Q^&}u;^N4+@UTb;DP0+0R3!gn-tjJb562wGFnc+nCL1Adn|bvz zY7r$(uC9Dj)~f@w-1=nwPt3Rd^@&JR+i1A0JY;jaKbwc|E2Im%gq{3*b|oDoR-0$^ zcG_e-l@c@+1+i8?ioVsK@Rnwh&*717#a$MnQM*WVe7C1zl&O3b_8vLbEU7P6UnuEt z(1noO^1FT=A>H&`I*zUga>5JrZ405pz1w^Qw+d@)Mh>Av`=xNoHTfHe9yQgM>OQS3 z(v#ciqqTd=V<|RrHCWZ(GOLc)pSi+o_m%YPfyUvwVh)8;vulhtPi=$ruLt@LV<%CL z%jfQ&y=lSd(kbP3mikuAQ>a^>vxy$ZL*qnnWk#{|#aSPMQBnc*lfI5ff`@EAHHX-N zHPdYEnYo9_=NK7dCD@YMC(e$~9ec|4hFPqp`Ddp+`e93*`YSD?fUjU^j@-*APkNA8 z-2_>0H__c+PK{n;xCug9lv4(!fCEG%_ zRkm6}8!iWPmMm!n@jkr^?n-U2W^W^-{Uw+no$#E_(w)XNkoS5a{q8CAnd=~=VJs}< zMK+1ysaN32=f$k^1;m7Y`ai}Qvk&@@zo3q2hQ7*DbZrdc6PZX)VOFv{a<%5MYmhy6 zkUS0gQ2?pee_LM&0SQI@;Ny74+-3$MZTPqQp&po5#gV8M2fn8lN`dLnH8ufh?6^j1 zvN~IPt!Fp$fk$CSFRZ>b8+4s4@DZOOyEIBEtF!X54w1TYZ?&$%0He{KjegJbpaahyB6yWivRxPy{tf zk5HH8sA1+%wX#?&_%y4k_oBChzjgSN+|{T~c43BaclkjAFWkjCIz;^mf)Gn~B>Rx- z(WgCVeA9UJ06L4A;YML8Oh}x1*ce9MWxDY^_G_i>7qMDD!#zNjVF=#O&LB>GMnAHI z<$*8hEWEY1KuVshj8F^f6HJk)O_ye8Ag{L+e~J5twcwFhi%xr8(1&gyr%(ffiZcm# zgk~F2@CL?#P4&X8NPI?$%qC_L8)gsT%pQVHCquWBY)%9NvkSP^Ip7;TgDj+n<_5e+ zYMY(@KNYJHQ4gNM@^m7jFq_yJZ~6%K+-zeJap3g~y#MD3S)1l7Ts`WkaP zCEGT~Zp~gi$GvQK;?{av+Xhf0^=FZLzSInFdaU=LKRNP2Ijt|!Y3(CY6@O`&MhU`3 z#WQ>PTh2$(v*S`@yGLEH|4U~x&Pg{ybfBJ(_0{m753U!RX*DdUT;Z~M4s!Pdvo2AF z`bi&RFYxzmg&iJ8Guuse9kEsYBXT$p^nLZE1dL#h$ZUCrehXE=E$kb%AlsFx%KT)i zahv$Rh4Qx1*vWMg7(SCZMs7BCDtpBoAQx;B1E}(58dZoD)E4F&JD78D8gn1s%(qxK zRAXM?6G}5TC_j3wOZ0C_8>wz2SGZ1Cj9inGbhp)uY)da@Dzal)i_z%WRB5QB+?ZXe zfi?30wDQ;1X!Dys2|L`1%4oQF3n`Ym*{Fa-?!ZgLIz z9n_j9P~}vI)HKfXItGBgZhKZwW68-w3V1y69Z!2zIlZKq&lIDT{hYK5+255X8j@rgcrlO>Ze?*eQe+9b)+6E4Xen>Y!FzO>eiX1?W%}>QcH0B07+IeRpl?C5f0#_+2h%tQ1=+jV!l z=**~+?qT+o+ym;d`BrTy?F}ysIm0!?(#lP(26)Jc`afzJ(43|i`>Y$JN%!EqLT^V& zSAAD&$6+Lfl``w8qLc@ehri+C-Yb7m25L4WfwTbqiu}!`F!vHi=zqC-wj*}cKHomd zcARg(7R8)-T#c8brLJJBc$Jcx06tGCcnWf3ra284=|=JqH4C+iiNY38ByS79xNS^P zR8_7TMfJkk5il1Pt1J?c<|38W0#W!le210sm4iru9>MHjdvkNalC!g;>2YLRtB!FA zs}l-L=T^F@uY^1QrWplV>=3*;X($3pgMnqiJ^F~r$(pEWwg&qk2ef3L;ZLh<+yK35 z2VC3+@{PBE;5Hpp(9_VWRe=8xihrsUxPY_Z#zn>pl$#N=oiz^J*bEAudRn1U!DM_1 zr~Gh`ASzI~QIjjk)@3&{E9hs)cPVC-#0)(XM3;Zy*8L22>oN4|f2Z85$f(_dOphvX z@MZ%eT82LACYV2S%&SI2WN!?BkG?m^vD?%JAZV7BFNkU3yP+q+tiXdnuR!O368I6+ z!#%~b(hj8!s7E=m!u-vVp~;QRK*z_#?D^l7C|R;l5pUk&$xmaOJEw9LNKS7mpOIL( zta{a)2QE};{c!lYf4^^C@Ue8;EW>`c-F5GbU6`OG)Id6{;aou1)^bSEq9XMLmGhmN zOO8a!g!=|9|3!b!z~I2X;9RkpQJlTu{1sg-HWYQ;84>og8Pr_l{{ao($GWD*4NrV&vBbDht7h zF;O%Bpf=V^AoIsX^#B`tCmjPPZ#`-f#?V;0KZse&$*y=$=QHaXEkVb54BF~n+8up1 z-nSjeM@XJ-3bwm~irye(@WfH`CL3HX3?b2c-JwAjhw-r5-7IA+0p=9N)oMvTXsontnts({w3j72plrDl#S`0H_ zPAHB_!Zn;qt^?g=F$gGL9O+;<+>RsfD<{Tz*kD1J$-qh>78H--$lJx94ld|AASiz% zj*{)Eo8VGRr%ZAY8HN_K6v)qCsQh4LQXqm%p?8CQd=*;0BT)Z7#9Dk9*@ap{&&LW= zqEo0{V396It?MwV8AUk3WEW%GzR)cgrr2&dmc7SF%>c{uNpI*U%z8AH`M(D5GP zd=7&{_d7k{l+sgqB@Gb&48ICK3zWtk~#IE`Uu$v|LM1l3&d1z zQgp{$@kLjboK|#D{?R!q#ouv>{3Lc8wZLj%bkRy`2axdc%v!GEQ@p|Se={Ek+$za5 zbX@f`i+&Xq^i=V1o`|!$&>0NKKZ!kL7djJfwWUNUwMys@Z_{)kqfb`eVCRUSY%tnT zZZ4Zmv46Dh656qsL5ohHpCA#oy6qHyfJq{Y8BLVx;_UFs(D~qB!H1#gVr{jBIgh+Z z*{MTN+>wM%)TC>2n}p%EAHp%d1~-*yO3kxofYf35yd#-Y;}R>$h8=ti7_+Kh#Y?g;dv zoxn9T$a)}KUIKNb2fizaWmuz@0-3Fs-W=K7Bf#hHf*tWM%+e#x$xwgoMUKq^x&?EN z`M@k>uG1H&F=RccS8rnUc)^{ukfh~B3SCar+6ID%)d>u&mSC}MpqA6cnHJ1ruzpaz z0aYmxB$d{vm@db6eSS(OCG4TPLjTwZDo2hSfM+zC zs!p{9x$QJaJgcq$VZFKml!dOik{^k=)M-#NEczy0iLOue0O3JIj*A!hPUEQWR9m=i z7E_z?9p_*SjfUbh2@1PC&{p<_=JXme^m>~$jhUzb+}7)X=yM;u*o#J2qYm;edF1;r zhJ>|9QJhh77_tQoq~AUFuL$%EW(3nhY3MF6a=cOknJLHd zEU2cRT_3IIT2gdTi7`dW=AD|YRkX)`jK3=sgOce7o1f+|-+V?Lrl1t?T}VIs%a_*M zm#NfeA3CdeobLLLV!|43Az#_n(EiOn)RE2ckL`(Y7`qaVtfssT6wWN4RxAB@R+aF6 z`6QmFAIO?eB2ED7`!pChw=L3`rCya+NC`+b zUnwziw9-pkXRafk(qZNca}V>d01|is#$sFXi}*jerRWuOASRhXe76B=CnYGaRdQ$n z{gNp{k$eUjg-K8yszfR!G99@q+-#2GI)Z?=4Q`!^d{=lSRZkSL1k6MC+t%@L&@4_7apJg{kU==BW1al6o$rg&pD=sM#-?s?o~0k9z77{RZ}Vi^2Xm02&yN zT#;fRlAR+p&}>&xVK50PdKdKzjO;<++1fyJs|mVXmXQGdLw9haE`oht3ku~2NR-}& zOp-@%kJEH%nx!v;K>C%OgBALA5Y=m8Oy9BwTKB=HJ79DGd!{Uo)=4y=)?t>p%UY<# z7huZMBdBCh1{WevWFp+}23>$2LR|)pyB}(h`}Lwav{HIsynTkEAMzSKiJQ6$>#d3) zmMk_i@TBAX@xCpLhkUz*P)OZSn#udcC*jyo%it4KD)$AJ1R4Zt1>Ofb zp;}!AdEl!2LwTx>(&p=b5SO{a(T8))C`c9=QNWYCW>SIZ4vt>-QmzIbo4c&zAAUQ% z%PMJfP>)AW`KNeiWNMk|{#MEbYM8LrHc_a-#nIErJ#=&{-r^?_(s{NH-lQF7uF3&sM^eMWOO&?)^c%JtJsCn zLKc4>{h_&LN3E-}99`8@%1Cv!nnUde=E_ZZm6EP@)6W{UEef>H3d{qR=jZYf?jTzc zd-Vbsdy_E}kEH%0e_EMFlrG`f-LHOE`@zPv4|!rbI)QV^CfKRZ#oFdyY92kF>B(lX z99J1$uoGl5oCwvh8XaNqAVE&TSgowr(Z}cq_3QdO5I}mE8?3KHA`)bp;cfAr>W_8u zK5*ENfD=^-HKx(f2G<5RyR+c*fZ7A=rWWcO z>;Ml+Q$$B(aOmGaOyDKn7@GfMU~w=uG&3|N+&VHyd@KEg5BatlZ`>zd@vfMKIe!-T zSg>5ajXB#Tc8==jEbnd`OD8Ok-w^ZGHJ$HEjx@F^!y<9PmHuMB*sRI^_ENw|K!=GW z6Rl5JujMfZFgC}JsHJg9akHY9INi1!Y)g1&7MKI|Udj?M{|-QTyj-jhnHFs0+vEKs zOY|2IZAO@q*+pC%?kIZ+T>F(=DO=Fq((%@Q(w0k@#r?++ntcXKD#qjlRd+VNBICJ`Xl{_-htnVSShWiZ{tb( zY?U*4V-|>mI_}&pV<~7nr;+M*TOWqn>~Jd|2-1DQ3%!Vy!&Wc?tDraa37lVzhynAd zHF7W%>xX#_xkq!5{BcRIgcSNw<}Y&~JQXRR@Rfx-Cm&WB8}KQmvG?o)CBzTR)_swZ zy$3D~3E@RaaMtZLJA(xkMul%8_-yTsSy=fWH|N1~5RLWyVUTR=f)bccBqCEmK(a=6 zB$b6xzi9)abX(~4i=)zg3CZ;z!4b=kqyr~vc0bIwAoHz3C3+ER&;!89{~P<_RiIU! zz*D~f$DfJ)%XoZ!Yd8&p*nPh-3uFH1jM~f}xDuDK_eh6ZxeLfX%|M^phlJvCAY@L! zE+?M3jI4w6j0-i&*`TK?=xOc;C2N2cYjy$kZXL)+#X&%7!Y3$%X zn{hb8Eg)V&}+<(3xPPAQQY9=o?7&M+ZIx(t_pD z>FO=5lJ_cvdI*8f$JyPU9@#JEk@F5p5tAP#oQx{tejU{#p+dH`Nf+abMQ^hkbbsq# zt*^W&QZZaDSj4~Ie=qEl2jB@DtNy95QfYCyQijOO=X2GI&J*+8v(!1=c9?q&wrYqw z&(&f6B9~fGMo3v8X{g1W2;B2kMYnWqu!HDTUTb6YZD8GP(=Qvb<-eYJCpi*rqL z46@DPi*SwE#mpFbAN+0Un2S$n!_+5oMd?5!A?yq04&_7jys>u3XaSE)XLPi~#02UR zlfq{T_iTG@FNJ6P8qUccqsLGo%;_f9#G1Mt>JGPhP)V0#<>ROcv{!~`FOBzB9H_Cq zL8R}gEr#1%h1%j8IGK77|G^QviF{2GlpCB+q>my;YcBYzFY(qnie1k)B<|+7 z9w7s*40hMAL5r=0XU{S_!=L;0KZ62cANsmXM(jdVP; z9b|4=hPFi=ql{9#>T}&=9Ma~3bUH$pk-;?->scA;4c)XqjB~_2x(WON4NXN`q5h*N z%51F@_96xJfZQswKbRaS?Elku$9LM_E#QQzrwex9QQ=7VNMyM9Oe&|QniKegF)wpu zq@?8Dl0757K=gOl5>JVQK#u#l$L4yR?Pkm&`yS>IVHj!JQ0=Q)R=yYZghxhhM2dx% z1QUbh1O5GR!8(#`^kll();R{+ck=65iK)d_;+r`H&p~KWYuWx_8pjCQ>f!4($mRfj4A>R8${G&S&TFhxxbM7j{0Yu_O4k_{`-{TRyTC=hrg5 zs9D5GGaruSpjuDKAs>()q4(WLIw&?0M?uf%v!p zg8vgYh@}}+^}uRQ0V)2K5o>hT_aiATS@Wv@LuKwdR1`UpJW|cfvdUp~-UKWBX~+tg zPfuX_v$?qOTn?@i+nyPV?5Gr~4A~Z4#C7P;bvHg?r%?~dZ_Tx5*u&}SGHtfL$G8e7 zR4H&oUt(ul6!pY~=vOvl3Nc@h4)6!4!GBR{7_}qdTFro0i89Y(q|`>*)Ns9!o=NTm*DFxlDhBn;dhBz{ z!}F>`zw!kRx;@}i_rNnX9iFz$*m+%mr}7tkVl#ifTy4KycOq&DS#WfnfUmBg^$gX_`zB?1(S5yPp2BmzA6)hY<{)znc#1cXXPRXl zB+D^Jp`zfqUAWfGsb^#h>JgM<0nWxRVZWgQ){WGVp3o6_PuZ-eW(njUEW)ZPm+scO zA}MF7c1GWBEHl@_wbu*T-WM>3lt5RjHr#v<;R&3L&U~i2OL;9P$=Rea=oIXWtO{=r zeF?4!vcaX;iNpu%z*Vz8(qCLDy_9FEh4dxXpInuwQAy{rmrU}<#l>btb@OD878ADT zn47C@j#Y_Mqpvxt2ySjW{X6-o2C>B;lvd(M(Hkin+UGCf|ADnzMyQ2!N&Oo;?ff8! zwt`o(2c4a(Eu6Hsb`?QZ!vAa!xvyY)6~emPK&IqG>%CDxi<0eP`N-Dr#n4}&iQy#i zmQ+Y-sut8ZXA2(iOjoaoFXoCmRMVNI=6uXhFj2+8*ZVrBwQ$4pc1WUfqZ7W)sEyUc6x78gA|YosIO-R$BHD$7 ztWvaqT4QOYZJpB-Fvpy7u{L2B@t+3=hr2g39 z5T1>!JBM`xq}#e?A+r|VVh8a_6f*Aqz>|0g-rs6)2~UB``3Lf=T9Bi_EbNO)#4Wfs z8)40b_X%>uH-j+072eOyaF>3BTXzj;qXDD`q~jX*z}a-C#-fg`;r-bgnM3K=eZ4}$ zMO!$Yoggz@Hdmk`+#HD&8Axk1k&`(V*&e%f4SkV0=zhI2o*IXYJ$OUzF-~KRJOg!r z8+rkFX#T<;Jq>AaeUY^sBSlLl64#51OCx+_KQ!gFpmUCu+_F!ujrBpQRZjR2ZA%;< z7w_rmY~f1sc%$beOib>T)0^BPX{D(V{~!OIuP3AkKe+nbeBNf$_?ygL@=w&j zF2Q|O0Lsof%5C|L)D{WmPO&!h3iG9oa(A_pzScN|-or;Q2(M6wk(aTC&CL%L`Uvg# zE*!(HWM`poG?IEs{(^sfo|(t^fs}!<;KY8EFUeJvL&{!cUEfeTsSYGiaE9GX2C-ld zQh+&nFI2rzYymbO+lU>?wqgCKt1dyMlR`c5(r;%o-2PRqQ^*n=1ZCk}eXc$eq;DrG zM33;COvhSaDZJm8k?&gqbs*Rkv5zcKi&0yyk8!AP1Ak)oE@tERl9ao?Y-Gqp*4OBoH?w*eHmIkKbiHe zZuaYV!AUl?@bG`cju=zW+`l_lSl`>5b=ad>>AH;^fWkx&j&Qo)7*>)GqMSF_MWC)y zhgm@)rQLIuv#!1ieV&GmRhj(=(OC8jD{2Os!m3ZJ_h4;Y42gB z3A2t1SYrTr=K^w=ZfP2(^%;f7J?|qp;Jl0m@ll_R=JK#&`=@exLk%UC+AD%g4 zq;kfwH<%M}*zJ5YIe=$a^|CB)Kf8sO$m`^9&z{!>Wf%0oxnMKwgg=bkmZF~x&xlLP zUm><&M3wMKQ8975;^*aik-tFVwZw~whw{&loe{Y*{AQTfSJ(3~GtslI?M@nr(G_xI z@tQ{G%+5ZSy)n0&zi(c}oTJ&l}OZHYZf~~TlnspxVF18U31x4tFiu`hDXK8Vz2p%_Fc}SI4hQd`5ku0NlvJI zhf|KG6A|9UIa_mB^S%&Epc^a9@2U&6WX{YirZ2>DUdisr1JGI@5F1NYk6}5laERui zi$`*r(LY2**I;*aM9b$VTJ;`#nHM3lb&uU9wkj#iRXf7(bWYG}4zDk=clLC2`a$wt z|HZa{64GZ5*$YM4BccU1bpSu&aiUS>?6aI5JOin!hR^*NzQ$gR6=TtJH`!hIXZA*@ zjmPIA8uC8zw}V6n+8|4lm@&x7f-gA9dkRrs70=zj*aB}ddh8Cb_toz&Jnj*r+$ZMT61MBCp`VY&O7rt&F5oI-<_^?)stLZ+@G;p+oR1@4`@4> z``-t-k?e54kri}>T&>l;>{+vrcwu`rnYHfyv7dtWYh=t7H#pVUI%xGnC(q=hzv`wB z)P8O3HkO%t7<1~{z1U^-9dkKjd>?XZp5kp-%kJP$jCMrQn*_@Ry9YlE#s=jy+v0%| z0WENw^Z)ky=aJh|FxWfLoBuyGs+g_K4>=q10P*(sIDaC~-l}vbAI=^1O-w>etEg3x zqoQxdHjTRxyDP3p{&y31BsNX>Dt>;<*{Fw+SrHq<-w$i+yXyJP6{9tG$o}wm%++&y z=JwCsopUDJojomUP*&xvqFD#Bm=WaFB)+;8zt$!qEEAQW`YPvNMZ4>Ie)Tr-J@zj2 zv~{yiiM+q6`d#*(-^DKWQ=9~Har+sEg0soV@5?&-UyY`mKEEwEIXE$x6Fg&ht!kW* zb&3qOTy~_WfaZ9qj8aSDX{n(%=VY%++K0sOXQ5$s!EYBU-QQ%7heLKQc0??D>hB5V zP$l9Y-(bCOy}};Ui-`c0!Wym382t@4K}$05#xkNdg|87>87)`+n=?o+FeqZ zJn|Mh{v6}8Mgwfu=$2?M#n-<6%AthE$}~LXZ|bXJEsP#8D-4JW)03c-)nt=C+J%; z?S5pRkKfsM$RX%5Lk8@ZiE7%L#tv?_F-JH{jM(6{Hg#P52?m6!H z&^yUH*Sps1^{(~Uo<-hte1R9e3EuXs_L#*uJ)8Aji?qg^Snz_~E2=r4lk0d9yQLgw zGbdOn$k9{I)SYGCGF!m+y3BbJ?0C4<7@u#lf_9U7n zlhaz}F)qEws5Y0Kbw;wk#xO>yHJsOQlAbqWACLl!q}5~}8BnFLQ_~strSF1QumG`l zkMaoHQenT2mLU9lcCacQ+Bu~qCqBet;jU%>o;modFdSaLhB8D-RJG~%0P7r7Dqcy>`hS%`R$q3dDeRmwyW@*V zwN5!L86(yy#o538XXTjP-zv>sJ+?ImIux+7>@nI*=QKGrbJU5>1k>WImC5EAyRrV3 zQ_3pC2}pzOVdfI{NSop;qwRlcC$J2DmU-TU%>?!3m$Qs2!{+ z%1zhj&T;d)p5*=woQ)wOuo)ZgM{kz)mDx$Dsh={xwWfG>Skuj&+GTfXP#Vc9Vc|KTQIi;3VDm%6|OMg}Q*gT_MR9;$ZlzQ$vYGdrAXl;l4 zU|^WtQIAq5qj8Tqi}aCBlJSpLP5(Er#OkVcvzJ;9`_Wu9&T`J;4yThf(;ltHE8ZZZ zyc)|~V4$|z>TVX-O51Z-clM{c-e}{r)_OQCg43OJeS)&l+GgF;E83^*pmxogY29<* zGOGkjt3h`Sdy3V|)x{p6Hc*Z#mC1)JqudD&CdNLC)dzpL-&X1vo74~8SIqg&BYnNH z*!oNz$V!WjPNJ)c*1%4(yG686j%OG0E%P1m?+7MEG_^}w*WEp}j)4N?ew7P8;pt{Z zStG)oA0PPy;sNcvruH)PBkh#$K08|PR|_fiwZ6(Yv$8A6k=4+zkeN6n__edtvr`#p zUBUCX%HHP0=!upq*w+0dyz%p&t+2?M#uNX)$_Kt4fgEGM-ouz+UvSmu1d$kDb7yqk z@7g|B2eZEtbk9}BTZ66FIG22z@}_y){i){Ii}l@F17$0F+E=wA+&k3A#x~8?R~VK) zjdLIT>QsFyr(tTIsdl-bOFe6xF+WfeoPz8Z{E==2XDFBTa%#K4dZG|V{X3jj!m6`I zvY@A}aw_nptDNVQ|AJ8?9(jQ1&nUZ+R)F~P*DCe`Qu}pqr+r#$YQ=k6 zYISn|b+7TB3)CV@q?h)IHB9Gx3UjQoTw5RXIW2uH?LJ1B{e^3|5zZ;>#myXhh8`81 zVx_xUlhHAV^>i1MEyh)+kzNZAaXsD3-h)xB0scyvd(*{+7p7wu79nG+M8BTDX%>UcJ{V$e8y2N-L=_RtJDn3 zGn)jI@Ohqbto2Xvs_I_9-{VJ) zS!l$nxt>1e+kw$htsGzOYV|wcROhptG;sCPCDvynd5I_jV1 znrg+et5S|~ocKmpbs{S^-|=m*<_A;U;ofoP8l!5s*Blp|=BuGAS?#qqBKP}i`dhe4 zxJzJ5^ivm^BRH#hhxMUS#aV^cE#Q>1Kf?=KTIr+pHcoil+vN~x%!{t3YgVdIo9 z{eSyPdrt=6wYPd-S3J4R^>e-}Idz=4@cvf4+8>Z%deEqKkEuh@(@ zee7wTVZ_Y_dQ!D!d7af}zFpSD%yzNU)fQP%VQaJ_SqFkmqF26L>hBP_#EH)v=kojF zvW6H*QHzZ{{{k$syxa?Fif3G&rjB?2c%MIQ$lh|9R zdT@Z_@=VBCqfGZYfxG6f-pf|Y;1m76H^yimP$RCoXJ+?ipO$|5v%ptY>#*fYWFSIa zsCV*bksp{~6jftABk*f4vbNgU-tzWa{(;_?dJ!YR>7gxBDj64Cxytmw9Ib&m!}v^1 zaLdA~G7%%GW3sx(UBx&+{{MGMrnSU0TsN%k&J<4Cy+o{LH}jt@_Ij<8byEqmmN<$l zoVAg+T&tBq!C{6P4d10JoDcXDu(v&B_TUyw6Yx#7FPxzr0IMrt4A)zSuegZA>E&s7i&`T>@- z&p6^5rM&Q;;pEv=eXc*rw!D{|@_8<;nQw&|6P%)5aDQQrFed34*mXYFOQn$!r?=E& zwV#c9_GE9>;B;fI=O?{upo#OPr<(o9)L6-pLyki@SX}9SYQGt*s_$@?sGpj)UEH(T z_{dtvj?z`E(N4J9)ER4E()?yvu&y^!=@hKxtOz^lEYIHWIpD5tY5sGb2Ks}XI<7wM zD8K5w6LvSZhxsy0H-`tm(FbW|tOe$CPK*k(+F6_2E9^_ghn~ImDgRgQ&m-n!7q*n> z6m`{$ajs6WT2|S-ol#5ldYK)y&AzHHgxLR$lx3}WV zy{@j>^8jl~-Ccw8tS#XKtwMnv?pE&3IZw>{F?;iV^#2up&Gkoaea-TIYv)<_-T#n- zJ=opSbivijYHz2ed5hh4S~=&vz>v!Fo;ueXcbwcwB3u4mI-Gll3FcMq|2aY1i3}Dc*aSQLu)(TyNvw9q1RaT}jWH zt>1As@ZYye>O+W-atgA#!Aj5;xHW5vzomPeZ$ZvDb5G=<;9}!bPog{Bh<85lO|{=L zE4eS|b%S4+4}Aw*qcbnZZ_B=59V-?NjG}cj^-`c=%T=}%E0Zw>PKQ|kb5t$X7t;}%e!^b?r zJ=3^iO>qqkoV1s@|1vr&YrV&UU96_A4bFW-RZHmuoQq1V`iz}3-p3y{&-R)3-M^~$ zjY(>h-k%eV{KWE->=$Nx?+kl}@vi=n(%70{Uv}*_N806F@0g42HLl+|!F->U&FTzd zp>_?ugL*sI&wI%J(pc(BRm4l3>ghzrU@b=eHr8B!kMP&6M|ln0mRiDTXf9JD^fH_g zncua}*=lC$2g!pNg{SYX@jiPAr&ztR4A~ljk-B_t_3n-JP5&+LLVUXJ(@M z*trmFqZ!&5a~N4n{g}I#QQY=Zt$~wc9(0#*hM50ojv7Zq>pQ(U=k+Yns;O1Aoml5@ zYxV6l?01^X`I!auR?d6&S*@+>ruBh2(znh$PHgWR_D~*T?bp`^cOkRysO8Kd${kOX z^(e5+bzduw#W%~7ZC40R(f`ywvI-L$dtg-|(zR9DWRG?FXm#|?{%^?ixMWT<39;oN>U)->wzM3(s^SX{SCjSI%y~Pj zJ>kE{%5{C_oH3Uv-)UW~IrcF82R?f@vx3c*X0Om%t1GQ0%1C!9YaS;6%;6lq?YWg) zGxY@fx;c(~UWNZJGh^AWdX2)$BiBd4Ug|sACZ~@5lk2?0xg6SW*o&*l1Wfj%SS6he zdULgr^|yJ|{mj{7v~w!x8D^5LX#3FELDrbfQS+=BN}~2Tv89^YcD0Fd&b63C-;V6} zx7W(CduWr0-YnC8BKq*i8DofbFwv;isDshwD)-+{{_P%|>nx(CA zO4;4$#X+Y9Gm2E*P!5@G_3KWWy@A+L2W5~wUES^^Sd+;ct!7w6nhKL!f8Ofsyze=0 zY&XC6aIT6u-@Sxc;R{z8=Syq3YASuL=jylQ7VJ?TS@o6LYHw>gbMUF;tL-;FQ*P=* z?4xG1D~cUB_o~J0)$B-C-AZ&`(;q4ERs-8quj^%lx11w-y6MNWILx_<-@1~b*ejJ| zdbqvF+Mw6gJ^%wv)Mxnmx^pVlWA!&{kTpoFt_-zLbJoTn%Vq|*#8_f4WZmXZW`KO3 zNc;g?@uKI)KXTN$th8rW&w`G@swq`z>S{}tc?{$GT|2?*q1DDGr6R|%oLALHd13x# zf2eF>-*3gNp-)k5qrUo7-_P8+nBGx2W_?UPLr1%px>(6ls;kTFk$SZHzEhOhrpd~^ z!fG39fMco`jmFFv6V0JYsy4{kZwz+Q)b{oz^E#)P-m||`n&{J%#r6<(ABe(#*vz$* z^Vui5yDAlo_w)~ydBJ%0fZl3->HKUTQzj92V?}{8!1_Y@QL98g=@Ip%^Qn1}*h?aN zWB;rD!v4_xv=>$zy93dxhIUEifjWuk(>DM2upSbKCQ)elE5WAc~4%HAM8-J16_cxj5KF87ibzU`= zYkBZ*h_gojnz-I`cdj+qSw`fjuRTmTpyoK~_Doq_?I=#XHk=$lLuqFZ(Yk1(?8;V8 zY{O~RIp?A_k2RQ`v;<`h9uAM5tjxBnY01u6tD3SLu5Gcas;?0RH`qVdP`_lP7^hWZ z@9%=hT94JX#H_w2m;4j!hGMJpk&}|@erJ}YX$PEn#wSGm_F3oc z=~}9jXXYpFF)ou{Ig6ThV%HMKOC3Eh=a>bw;|r!aqDk@5}B*9hC)~ zU$@zGDNFR$b{plix>I>+?^idH(Q*lE>Q6i>x1e-6vNu-|gWZ9*CmZj_4(kFLLL=DO z{hs;=@03Dr^ED^Q3hG@Im-UIhO8bF*MvJ-DE2FGt&Jyjh($g#k`XyQv$&STWId`sy z@(LL?hh(iG5x1xKs`e70S*rC{T01{!>?{t3%P9jHsn$59S&2B5y`k5#Z)}P>RY_$} z(g*la>#!I3G5lK%^yHtj9i zUy5$Ufy?*JP4;*CQ2Uk@uf9-kIeo0doUZF*U#?R6U^UsU;AFA_Zkm~oVoU1tcU5&Q!5fpG@d-{W6Cb8w&T`&Fw?H2O;_(&lblBCJ4DvLQv+mO z{GqTm-swgiV;FtPIDK6s8Q-fb!?X=Z=SJ*W+fG;RI7RJ;j0#ywJ}XfJHj7+Wwqe?l&%0l1M_Ac|Z(@Yy}N1Jn$x_WcFr}=}Dp-yn>fznRu zPpq9^&(2b<$Z=mw^r;=*&}hzO98Nxc7Bjy`P6M@&bHXa8zNw`sx-&+rr;la#>8nKc zrJUZJQ_x85rj=Gy=NJ*He(c|{RO_lv!FR{muVht}Q$~_q*^B&r2mZP6;HI)W{zZ0f zw#kRLLFeye;@2mF(4W2Nv&p~g!D{Lb>leL~JJ!5$Qy3bRoRbmBI0S?$8*c9+M69u9Cs|3!J{d z%Ap}jq}mZ_Zm+zKmt_HanD;*HdLzPj39@pBY%^L__ZY>(2R)rLQ1>u8sUnL6eVP3AXcXvALL8G}hfwLJZL)Vtn$f*>3HlT^@S$_~5ZNvwAg0_$46i|(p zC2(g4S~d`>RD<>%X=`QbZOZ3PM6VLC9oQLxl2efXG_bG=FGo}AJV=B;i3n2!b;d9T znDkU7?q7x{)jp2gL;Y>=avIdnAU|vYSu(?kYp~0l(~3yTK2}R{Vkqkv`_rQ>e40b= z{wA6>AIxMx`={jQEG4S(J@M8}#94-bZy!{=4mKm9P8_IfPaZ*E@*oK;AV&0hIuc;XPWe?f^a;J_7HG@spcp72@a{>Mn*Y9b&5K+_%Cx|H!|9_tY% zQS(^-T9chNht~lxv>v({q1`zeP)7x_6uvR-`3)Uik zh2EIWs}=leOJ0P{X$_Cyr5A)hqP|4dCuDNMLL}`zO3uIRrGEk&`~oOmMZ0HF?`GPx zgPOOn7yNcw_c!>-=K2A&OEeN!9i31Oj{nLUhabqv_=en}F6^TpL#YMG!QhN7PF1)= zA0xR_PBEy<>i>smrCro1wn-WiuX9a>&xObX$pocQyx-$`F7Htc{g=6KKYTsL6G3=$ z5M7nWSa};HRYpdtVxb6U8i1M{`j?4NwA$qyW>D-g;0)24Y)P{yu3ks zD}bDiXs(j%Ve=N6A|HF^ORe{5^-}n_7#^L52Irtr5Wby-61$+lQMkJS&SXQ?`{>;o z)LMu(G$PBSLx={oD7y&#%7lkY$Wh!(FBUj6;q)@rUr!}g`zQDuM{dg;G(~S#f^-85 z1Ni+k*!iAMbK$sXvOHebk)mjNdjsxVgEq0$7Eiy*fWH!Ocr-e+ClR?Ryw?Js)u4WD zT2qTQrE)@qg)Y#b&kIgJctS?P3(8GL{zUd|v_L8|+)A8rQJT`alG|Mp2`A`UBx zBr_)*DhBE4UHG|}mD6W=U8F}Da7;LKfHEI5&eWpj4n!U&vVvheRz+_{VfnuoG#`e{ zyas2wFrrlB|K_x>DebF5UGcQ)2PpE8-Y-VuaNY~l4G>Sf#Fb2><|X}4qt-`AlISfz zH1Lo&)EZ94qd!XUa`C-98a^L7rZaW+WDF?Jxi>eV-&1&X84Z>}bW`NCJiV>XozH1^ z76>(xJ}g0N z>_T^l-du+C9Hl2`xaT@8zeVoADZU*E>4b-HAc>k1(Zu(7onv&o4pomKiznc+pJxM{ z?V-V=m$dyV@28Q&W9Y~{IFSke{IsDHR4xrCDI{bN73gDm-lIWnP5zQ8%@3^)(!Q(UY%QbJpJbiP0gF3maRwS-DYAEt z9?j?4Z{T+z`fDWGbvC2dAM{$#ycx+^2_HAnmvxl!FSHkpp90q#fRWm0_92W|?ZLuu zwE8>5roUnhU=R5928bR^yC6G!-UvNRO4|%Ymr~yXYTgPZmZ7JlmJ?h*h)l{!M7QBz zJl4ZKI8+mPj;4pL>FL|hq6Qdyo6;MDxDFt^Id!)I@u^@u6C|~!O$8X2ic<3%=z}_F zlW0&?8XZ!I`}-hU^(nJ7G4+pl_2bH5YN>+mk&Lq-$PLG8@PnNg#vwTcYALVd@O%xl z5KR0D&CY{^f7ypm&X7JqUq+!{&Oz}f{36m^h!Tn-eI_{5(H}YBF93emao=jFaU2c( z0yG{5MK-cr80mb%_i*kNX|@@`T+q1y^b;&!;ptjXye{Y{2F_%>x4?5TBKrAxLSZC+ zN*$$;gokK2`KNNPg3Yc|N(?l~Pb*%Atg0)ZVn1W$Hn4X*gz?RA;}F+4a|L~{j&`Mi zn|(;_RX#oBl}C$2qovWx`}}#KYb7XI7G2N>32g)_-vkwHp=o#cQWK5d0u0oJV`6Ef zLfvF=okUB=u&${SJ#7UitHRlj;ikrj)|x9t;6n}WNuuq0kjHFra2^@T4pBIoyDY2? z(T8oILj+|}~NeA^|;Gz|6tri+x$}o0{wm1rp zH$jU{@Z>x_d_+%QLh-9;$@^e244>gic#sVzN`any^gNGWF7xCo)O?iM{y<6|!q;~h z5msR<$|$iF8)X*Qn1gM&g?4R4dq<&*l9ABr^s*%Fx40)A-Jvm>tLWx9u;&2@S-jlv zP3j6@+joUZ?db74*miXo;RZs(&$-rzQ%$-;Jsk{W(Y_2cljyLk)Rjh?vuO3J;P@yS zsSqQlaH}4YQ4<<`fre>K8&aV6EA%oCjFATbCZdrK!O;saD0Zb#_9Ue)rVl63T8HRs zVUQOQLg#Ie5&-SRdBT8l8Q6T4K&cDrK1b^F(bEc`FOJ^c;rBVF^QuTo+k>Tdu|)@Q2WR7Nvlq44K3%FICE5+cB2F-lPZaBXcFEwk_ z;im3*_>)5^w8_~?)FkAQ$d0YUN=0b-fXtO&Y zx-^)U(a}r&KEBliJ+JWTF>)UX*V4IDa#L5M3$me21{B=Rwf#^z15B<$5-x^n5*>3L z{hh^3jI#xs%* zB$sO>bH6%BXAO`U1)_`4ucz>d^N(mcN?J{M`Xe7A3L0?bO zZ#Vkbfw~9z{WN{cg`W@UeHt?FK}MRujbzYR5Z>fN@`=EsZ3U_2&^aa0Gco8Wu>@~| z^=zngnev}7t^_GvM#&gn!pT?Yo9HpoT?zEE2G^v&UT7BuHF9_=jo;6K^p$Yt3@uEj z4Z{6gFeCG-y4Xhbq3)-QEni}j$n3HS^r;ZCB;H_TY74I3fz}FyT z%3Xp?UWFneWnsvKmtIBi*+nbE;7d{D&j&w>P-Yo)XnlJ5T4;oBk4^a+SF2EBIAfD! znC5{J6P+wp_!Cfg8wy_sBWLMHI&=`a--OmLkq$rC55w=Gw73uUOlc^Pgr>P0qW4{- zc^R$SP0v>|s>o|Tv(1UvAX8}fMtHjsZLu7#E(md2O1?`iCR}>}xBR?c<=RDRKf*66 zt&D-X*--B#G7trNL@PZD>9Gj@E8AlHP?eU%ohvHx?*lA*k^Ayob%RkA-01iX{9rhb-mM3T8h)Bx*ipSf za{!Nx%$J`~ww$at4%wDj_)gxXWrDtrXzhlyuQHmw5?G6aZnY>~;W;;astgK#MNhTl z>YKb;qs6-LxgdzEKr3y=vjU7?(uZ60;354AM_O&@^pf7CgSNBqM`lMh{Pgo{EG@G^ zw9LVdqjMgjH!ne3nO};}ARPpYz2yb5(e(CJr24fGFH=EJ8R$_Ote1e=x#(FRG| zJ*N$6w8_mcJ}7L%LkAt4Lfrw@$wo6Kr9m5!2hp3ApkgDj zhLZI_bbB!PHESBDAQKbti2aW4mki&IP`4rNsn4f(`7Al2I;Gr(j%>*v`irA&YN%iC z5S2wHBfx(W{W}}79TuX~$DtQE{}wvzMgLD^9rzR4Kh6m9V;<+-r73}`3gtMJoL z-!q~2aX9yc_WF@k8EwTUz`1r{TE^A7*d=XfQxz~;6MTx-?*Vm1hot#1_I;DmTY3&2~^+~=XrO<` zkD*Q^BVI}LUJ{5X0(y&ahiIh|;Hw5@MnmTSwLbtCGM5rcoJR^wW^)z6OCJ45;guiE zMZwjA{AF`jHucHwSthmL0Z%UQbryR5!+iQ@=sXv_xG+TFE8Lp_f3NXG9IY}LRXmg^ zvYJ9)M1B%NElj0F#rRwd`Ib3bIVd33WPUVL9Q5}v@Aj7zClm@ zpkyO6$=%dg41L-T&XuI6uR_^U^!in3Q!!-eCD8kP=wq>e9B>u@aWaDX;cXUuc}ZW^ zqM81|@>&K`PCy$!*WJjZ4{h?FhfciEf~PM0Dl9}Mf7$Knr0_-*adB$ zLf7q#kK>@vc4{~a?stc*>5bI20^N6<9^9g)80b-eR;PsMA*ZNDhQ?|ZxPUmZ&*j`= zsrx>)aKbY$Z%8ixn*;ceN6}q{cruU0#DwS|+!Cu# zW@2$*L-d1;=~2-8DOAZ|M9Sg&3ut%){K&hYWEGOS9{oQpG>hE;wH89Zjp&WjJnsSH z7V;t(J&0|T#F3Qdv-6@qXnf$J%hRvda1q@)tyF&XLc2!d~E%&#e1PO zMABUJE|I>RK+8O$eRtsZosb525#P2Sipno3@UR+nTt}q|Lfv<;qDC>o4~NFTFpC=m-o)cO1mApjYH5l^{TkScheG8T zpHiS|Q7~8qDHq$qV7&0*dB0B|(vU)#KRgFHyOHc!^lvG+x()uW)6c`S@G9S)(Mr)J zWD@X{%#k&y=i^lb-X$SLCFq^3bMc^=a^P$fYd2O>`aMu}jwf=^m!)Zic*`o&;yzHM zIaCr!5D9FDjKuQt^TZRV6pmIZiIx=$Ba%-==uaU^&p^_3dL~gciA0EIz5q4%(*C1x zd@rxnpj)J64OH7r-7<@pK6q(mX*9O@r*e@t(Hh~PC=wj}=bKWIlN_$P(HUZK7Xpt- zwE0y~C=s%OSOMQMgPsI3tMHr5x!)v8=y+OT_<@mo$_Duei6FggtPHz;g|4D=8_Vn6idN{G>X>F;a-`2x+y&l>WU9A ziK|a|?mF!~LQUJi#qZ2zmNDO+jm|hkSwbc8H=X4kY4##!FQUn8E5|1?SvD%^sOf~gbG_7r+^BXmE?wOi0wr#!KY#0DIU71$Qp zZ$Tf#?^KF%3(%TU^x4PQ?ng)K@ZmDl6B}H7pBYG*1y65t^&D3+_$Jts5jP$^dk6Vj zgok7*XkG-0wqvuM=Oyuqv$SFbS5DA#8BMcjjqp{*oBurVLeCy(g#pZ=8q$(NT$h*l z#AL=FLBAi<*KD|Uk=GMS34kjp^%Pp+2;;7d&*F7HOg&e4T}L{_dXgD`B`7G|mAteL z^nWaxs8vXNH=*7J)TcAzOEf8n#t|)4553rmzZxJQ3hBznr%LoRmsZ?DPUX&6kSH^n z65vq$dBtg|Nb9vw8;{Wz8T({bDL##7JavIFP)0(rWMy6?dVdcsy#s~QpvxH~Ba5Bl zbU5=WXi8?}(U@68pgS_@rJHf7I2`W*%6N~dM(p-@pzoI!SpoRJ?yMIkqKT5U zKq&q`*7*DU(hfZ$QPvU6R7Qqo%$1>EdCIKF|KiQk!M^x1WG%DIw`In;3wfTysB;=5 zn@}Ph9Nyu226W8QctR<&1#C$h#=ZYS?z{Z;?t~3fVS%?m~>e z9~SN}$mD2V-!op$K@aajhQ#8&&i{|mJ`S?{63Tf%Uq0F;e$0DlW|#rHo3hl4@vui*RuE&Yj6;}oUZNPID9>_J0? zhiv!G==T{C_!h>jMY ziu`l&S*4#vu%`C1j|*9D#MI%t&{N4OoerT5r zm0N+Qn#h{?am6nu`#)#D0D~uT--gkp43sMvvZ*9$FFxdAV5BC0;y+XHK0IgcS(ILiC0daZ z8q#+^=vfC!mw?w_8AH2}A6Jg`$<3TvP7x=Z3}K&Bz^U)FcitjX>2tLC&&<%rV^^#W z`E8%U+i=hjhIVcRr^w>N@~#7KOTxc)(771=6icQK)UCjKY3|NOqEAD+U0~!vXk0%8 zEjA2t5%A0>x7+)^2G~Sdmv#=#W59-bRyu3`hGjs(u75x>9Qk zbX6B#9ifh_K>r*$ev2_>EcVlHp)sWkS4R`2kn8nnlSF>H@#?}8Z-Vb|N^b}jZ15|2 z9Wo1k4)>p<%_N2`bQWZa?Isx)g4_gn@CsV9Ea;Wo6w!dip-Uk!DRaR}P+M@C%g7=z z*{k$;U1+p83?(F1xRd`SH&MKYPe6i1)=qHkBv2eKjzl1aK;4Kd<9mYmEK)YW;%`ir_9O&wSmbWNdJhTbmAR;uDc4V&6 z7tP+C_DcLkJRbE!Jd}usA35I-+E<_-#M;S$5+#wCSgfQ1P)*`GDqJoF&SLmWf!DcM zT+iV17Vsk32dC)Mb~KPgp?1=)r6H?%Kcm@3Y}CbQ%IRp*nP{g8jFlhpc{iF;D4A^%u0=!9AI9Rd!mC*z_H~-vGTr&0Jb=4~mIjFAI#Mg0{xcRxEbO z@<>ENlKCbxCliDhfeY>FMP2A2zMd}7sw#IDf;M9RicdNLy<8TGz0Nn8?TDXQ^1AMY z`nnBtNSt4yU-xOrA->CaJ|B$C1UEPNew1(Xp`*ln4>FpI7f9k5VhOvDJ@F|&<2{JB z6HQkDsVKm8nNK>DD|^*sgM$CGVFRS+U3lIQ+7)AjD$O%RL-y8F^pbdvpHS~E>RW@w za0DGLTI(oOJqUVdF;XrC!`pcshgt#VtD@fws3ChlNt{{6y|TPZELJjMp2I`&ZZ|@g zcZKWX7ios(>PI`e@JwTV=}4@oKUW6hyBLXH>kU;Ud(MOGDzr{=<^&b-VEG|!%wl{k ziFF(UB_*y}l{)kBxgh*W=3A4HEJ#GDG2czvaGw5b0~M#B$i9#sz6o8fX_+#$Y?^P%zl3iy!O>qU_Gl)qbWeh`B6DW9r3%=guW6_iiUc{p|;Es>O+U7aOgECUlZBy zh|U`T61s(ErX9eocrYa=t2dw8qHiT*Q>dN>Jx)RK$LLZEY}^2A;ssKnw-;FykG4+x z#ZM;lhvJMB>GV*nBWZzT3_3jboWDmQsS^v$fNqs(rDSK6MUvZshyLiEO0-TqRW<01 zc>hw6AMt;lMV}o5f#<=L*nQ`q$#Y(kg&@AZoyfJ=B9aer9n|Mzgt!FV|BHb?p+@n} zOAd>SM3T+17aWV1bPIAQ`E);_g%|U@!KHH(gX4FR_01EbVNA)m)N6tY$OgVkq(V|C0ju-Vxe=6!LzgcD&KwFE84v}-06y@ z9Sr`(`_lxDbw&?2fhPGV;Tq5E20;>^k*uV}^jYQ*7eT>t><5Vx1*z>ZW0vSl@ny?j zV#s!AOH1n^2{yk=R8G7c^`LS!YLL`_s{uTjB8oY>;T$EBF=P*Au985UXh+ zbU8^GN2v7z@*?{3DSVY|35j;xM2m?(WD7EK2k93SinUr6$`prs64SgE8UtL=qadx2 zajhnt{}yXSc7gkz`Nkx)zaj5N=A z>ISt-6htH>8~Q$kU*ZAEkF4CGy}Qx6E5YO6^h4s&;;EJ~Hj5|nL94LP$S6yoMO};!OUhd*Vb~!eSX;qmm+9gDmti@$Sd=YR0Se2dTFdsYq%WY+f`6q$!DFq667I5e@$ z!S_M4Ey%t^Bu}E5?w}iFK4wF;n_yRBT}8owhqmUUCv~7-0VpDtUkZGXY%s~(G|}P` z%`OXeO7lH{{7F7)IYuV&pUMor0A-1%tt?|x4Qv1(<3udw6{WOD=;5L?@j9i^va9sQ zk2HyQMyyoH#FMCX7^PH%`eNy~3e7ZXQ;TH($rxS)WJ=uLO^F_`Cz?*Yf6qc_KL)P$ z)8ixP44I{#4Pi(AKZ{P4QSKW5%czyk6XLCv*uP|~J7_40 z&!}WZOBSolMJ1N*3VmV$ICqFP{O=V>84`W??=O!3|KFE*_+{o9%`1wR^dy3pJQ>DI zA`0Hn%R?L7p_hw#B-i}Em$c-+*0A~1|MP;(|CfJs@c;AQ|K-vD{ro=){Eq_vqrm?t J@c%0X{tuGhd@=w4 literal 0 HcmV?d00001 diff --git a/native_client/test/concurrent_streams.py b/native_client/test/concurrent_streams.py new file mode 100644 index 0000000000..51b9977447 --- /dev/null +++ b/native_client/test/concurrent_streams.py @@ -0,0 +1,78 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function + +import argparse +import numpy as np +import wave + +from deepspeech import Model + + +# These constants control the beam search decoder + +# Beam width used in the CTC decoder when building candidate transcriptions +BEAM_WIDTH = 500 + +# The alpha hyperparameter of the CTC decoder. Language Model weight +LM_ALPHA = 0.75 + +# The beta hyperparameter of the CTC decoder. Word insertion bonus. +LM_BETA = 1.85 + + +# These constants are tied to the shape of the graph used (changing them changes +# the geometry of the first layer), so make sure you use the same constants that +# were used during training + +# Number of MFCC features to use +N_FEATURES = 26 + +# Size of the context window used for producing timesteps in the input vector +N_CONTEXT = 9 + + +def main(): + parser = argparse.ArgumentParser(description='Running DeepSpeech inference.') + parser.add_argument('--model', required=True, + help='Path to the model (protocol buffer binary file)') + parser.add_argument('--alphabet', required=True, + help='Path to the configuration file specifying the alphabet used by the network') + parser.add_argument('--lm', nargs='?', + help='Path to the language model binary file') + parser.add_argument('--trie', nargs='?', + help='Path to the language model trie file created with native_client/generate_trie') + parser.add_argument('--audio1', required=True, + help='First audio file to use in interleaved streams') + parser.add_argument('--audio2', required=True, + help='Second audio file to use in interleaved streams') + args = parser.parse_args() + + ds = Model(args.model, N_FEATURES, N_CONTEXT, args.alphabet, BEAM_WIDTH) + + if args.lm and args.trie: + ds.enableDecoderWithLM(args.alphabet, args.lm, args.trie, LM_ALPHA, LM_BETA) + + with wave.open(args.audio1, 'rb') as fin: + fs1 = fin.getframerate() + audio1 = np.frombuffer(fin.readframes(fin.getnframes()), np.int16) + + with wave.open(args.audio2, 'rb') as fin: + fs2 = fin.getframerate() + audio2 = np.frombuffer(fin.readframes(fin.getnframes()), np.int16) + + stream1 = ds.setupStream(sample_rate=fs1) + stream2 = ds.setupStream(sample_rate=fs2) + + splits1 = np.array_split(audio1, 10) + splits2 = np.array_split(audio2, 10) + + for part1, part2 in zip(splits1, splits2): + ds.feedAudioContent(stream1, part1) + ds.feedAudioContent(stream2, part2) + + print(ds.finishStream(stream1)) + print(ds.finishStream(stream2)) + +if __name__ == '__main__': + main() diff --git a/taskcluster/tc-python-tests-prod.sh b/taskcluster/tc-python-tests-prod.sh index 6803082eb0..b735a30eb0 100644 --- a/taskcluster/tc-python-tests-prod.sh +++ b/taskcluster/tc-python-tests-prod.sh @@ -39,4 +39,6 @@ LD_LIBRARY_PATH=${PY37_LDPATH}:$LD_LIBRARY_PATH pip install --verbose --only-bin run_prod_inference_tests +run_prod_concurrent_stream_tests + virtualenv_deactivate "${pyver}" "${PYENV_NAME}" diff --git a/taskcluster/tc-tests-utils.sh b/taskcluster/tc-tests-utils.sh index dc1e7f3cc9..5455d748f7 100755 --- a/taskcluster/tc-tests-utils.sh +++ b/taskcluster/tc-tests-utils.sh @@ -419,6 +419,26 @@ run_all_inference_tests() assert_correct_warning_upsampling "${phrase_pbmodel_withlm_mono_8k}" } +run_prod_concurrent_stream_tests() +{ + set +e + output=$(python ${TASKCLUSTER_TMP_DIR}/test_sources/concurrent_streams.py \ + --model ${TASKCLUSTER_TMP_DIR}/${model_name_mmap} \ + --alphabet ${TASKCLUSTER_TMP_DIR}/alphabet.txt \ + --lm ${TASKCLUSTER_TMP_DIR}/lm.binary \ + --trie ${TASKCLUSTER_TMP_DIR}/trie \ + --audio1 ${TASKCLUSTER_TMP_DIR}/LDC93S1.wav \ + --audio2 ${TASKCLUSTER_TMP_DIR}/new-home-in-the-stars-16k.wav 2>/dev/null) + status=$? + set -e + + output1=$(echo ${output} | head -n 1) + output2=$(echo ${output} | tail -n 1) + + assert_correct_ldc93s1_prodmodel "${output1}" "${status}" + assert_correct_inference "${output2}" "i must find a new home in the stars" "${status}" +} + run_prod_inference_tests() { set +e @@ -540,6 +560,7 @@ download_data() cp ${DS_ROOT_TASK}/DeepSpeech/ds/data/alphabet.txt ${TASKCLUSTER_TMP_DIR}/alphabet.txt cp ${DS_ROOT_TASK}/DeepSpeech/ds/data/smoke_test/vocab.pruned.lm ${TASKCLUSTER_TMP_DIR}/lm.binary cp ${DS_ROOT_TASK}/DeepSpeech/ds/data/smoke_test/vocab.trie ${TASKCLUSTER_TMP_DIR}/trie + cp -R ${DS_ROOT_TASK}/DeepSpeech/ds/native_client/test ${TASKCLUSTER_TMP_DIR}/test_sources } download_material()