From 6efbb13a660185357739da7be508a00f4d24cf90 Mon Sep 17 00:00:00 2001 From: Shorthills AI <141953346+ShorthillsAI@users.noreply.github.com> Date: Tue, 17 Oct 2023 11:52:26 +0530 Subject: [PATCH 001/101] Noun error fixed in main.py (#9965) Signed-off-by: Adarsh Shrivastav <142413097+AdarshKumarShorthillsAI@users.noreply.github.com> Co-authored-by: Adarsh Shrivastav <142413097+AdarshKumarShorthillsAI@users.noreply.github.com> --- examples/multistep_workflow/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/multistep_workflow/main.py b/examples/multistep_workflow/main.py index 2848ee4b04aa4..dc3f837a0edf1 100644 --- a/examples/multistep_workflow/main.py +++ b/examples/multistep_workflow/main.py @@ -60,7 +60,7 @@ def _already_ran(entry_point_name, parameters, git_commit, experiment_id=None): # TODO(aaron): This is not great because it doesn't account for: # - changes in code -# - changes in dependant steps +# - changes in dependent steps def _get_or_run(entrypoint, parameters, git_commit, use_cache=True): existing_run = _already_ran(entrypoint, parameters, git_commit) if use_cache and existing_run: From 2d80353369d70574995e57d42084f9c87dab44da Mon Sep 17 00:00:00 2001 From: Sunish Sheth Date: Mon, 16 Oct 2023 23:32:55 -0700 Subject: [PATCH 002/101] Improving prompt to new format (#9966) Signed-off-by: Sunish Sheth --- mlflow/metrics/base.py | 25 ++-- mlflow/metrics/genai/genai_metric.py | 13 +- mlflow/metrics/genai/prompts/v1.py | 27 ++-- tests/metrics/genai/prompts/test_v1.py | 131 ++++++++++++------- tests/metrics/genai/test_genai_metrics.py | 152 ++++++++++++---------- tests/metrics/test_base.py | 35 +++-- 6 files changed, 232 insertions(+), 151 deletions(-) diff --git a/mlflow/metrics/base.py b/mlflow/metrics/base.py index fd1293f7c327e..538cd79e11f2f 100644 --- a/mlflow/metrics/base.py +++ b/mlflow/metrics/base.py @@ -75,29 +75,30 @@ class EvaluationExample: input: str output: str score: float - justification: str = None + justification: str grading_context: Dict[str, str] = None + def _format_grading_context(self): + return "\n".join( + [f"key: {key}\nvalue:\n{value}" for key, value in self.grading_context.items()] + ) + def __str__(self) -> str: grading_context = ( "" if self.grading_context is None - else "\n".join( - [f"Provided {key}: {value}" for key, value in self.grading_context.items()] - ) + else "Additional information used by the model:\n" f"{self._format_grading_context()}" ) - justification = "" - if self.justification is not None: - justification = f"Justification: {self.justification}\n" - return f""" -Input: {self.input} +Input: +{self.input} -Provided output: {self.output} +Output: +{self.output} {grading_context} -Score: {self.score} -{justification} +score: {self.score} +justification: {self.justification} """ diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index 151ccc7a530cb..d525e06c005b0 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -33,7 +33,12 @@ def _format_args_string(grading_context_columns: Optional[List[str]], eval_value return ( "" if args_dict is None - else "\n".join(f"Provided {arg}: {arg_value}" for arg, arg_value in args_dict.items()) + else ( + "Additional information used by the model:\n" + + "\n".join( + [f"key: {arg}\nvalue:\n{arg_value}" for arg, arg_value in args_dict.items()] + ) + ) ) @@ -51,11 +56,11 @@ def _extract_score_and_justification(output): # Attempt to parse JSON try: data = json.loads(text) - score = int(data.get("Score")) - justification = data.get("Justification") + score = int(data.get("score")) + justification = data.get("justification") except json.JSONDecodeError: # If parsing fails, use regex - match = re.search(r"Score: (\d+),?\s*Justification: (.+)", text) + match = re.search(r"score: (\d+),?\s*justification: (.+)", text) if match: score = int(match.group(1)) justification = match.group(2) diff --git a/mlflow/metrics/genai/prompts/v1.py b/mlflow/metrics/genai/prompts/v1.py index 2fbe6d90ec18f..487102034980d 100644 --- a/mlflow/metrics/genai/prompts/v1.py +++ b/mlflow/metrics/genai/prompts/v1.py @@ -17,17 +17,22 @@ } grading_system_prompt_template = PromptTemplate( """ -Please act as an impartial judge and evaluate the quality of the provided output which -attempts to produce output for the provided input based on a provided information. +Task: +You are an impartial judge. You will be given an input that was sent to a machine +learning model, and you will be given an output that the model produced. You +may also be given additional information that was used by the model to generate the output. -You'll be given a grading format below which you'll call for each provided information, -input and provided output to submit your justification and score to compute the {name} of -the output. +Your task is to determine a numerical score called {name} based on the input and output. +A definition of {name} and a grading rubric are provided below. +You must use the grading rubric to determine your score. You must also justify your score. + +Examples could be included below for reference. Make sure to use them as references and to +understand them before completing the task. Input: {input} -Provided output: +Output: {output} {grading_context_columns} @@ -35,15 +40,14 @@ Metric definition: {definition} -Below is your grading criteria: +Grading rubric: {grading_prompt} {examples} -And you'll need to submit your grading for the {name} of the output, -using the following in json format: -Score: [your score number for the {name} of the output] -Justification: [your step by step reasoning about the {name} of the output] +You must return the following fields in your response one below the other: +score: Your numerical score for the model's {name} based on the rubric +justification: Your step-by-step reasoning about the model's {name} score """ ) @@ -67,6 +71,7 @@ def to_dict(self): if self.examples is None or len(self.examples) == 0 else f"Examples:\n{self._format_examples()}" ) + return { "model": self.model, "eval_prompt": grading_system_prompt_template.partial_fill( diff --git a/tests/metrics/genai/prompts/test_v1.py b/tests/metrics/genai/prompts/test_v1.py index 57af1c669e92a..4605688e3fd9e 100644 --- a/tests/metrics/genai/prompts/test_v1.py +++ b/tests/metrics/genai/prompts/test_v1.py @@ -48,23 +48,32 @@ def test_evaluation_model_output(): assert model1["parameters"] == {"temperature": 1.0} grading_context = {"ground_truth": "This is an output"} - args_string = "\n".join( - [f"Provided {arg}: {arg_value}" for arg, arg_value in grading_context.items()] + args_string = "Additional information used by the model:\n" + "\n".join( + [f"key: {arg}\nvalue:\n{arg_value}" for arg, arg_value in grading_context.items()] ) expected_prompt1 = """ - Please act as an impartial judge and evaluate the quality of the provided output which - attempts to produce output for the provided input based on a provided information. - You'll be given a grading format below which you'll call for each provided information, - input and provided output to submit your justification and score to compute the correctness of - the output. + Task: + You are an impartial judge. You will be given an input that was sent to a machine + learning model, and you will be given an output that the model produced. You + may also be given additional information that was used by the model to generate the output. + + Your task is to determine a numerical score called correctness based on the input and output. + A definition of correctness and a grading rubric are provided below. + You must use the grading rubric to determine your score. You must also justify your score. + + Examples could be included below for reference. Make sure to use them as references and to + understand them before completing the task. Input: This is an input - Provided output: + Output: This is an output - Provided ground_truth: This is an output + Additional information used by the model: + key: ground_truth + value: + This is an output Metric definition: Correctness refers to how well the generated output matches or aligns with the reference or @@ -72,7 +81,7 @@ def test_evaluation_model_output(): truth serves as a benchmark against which the provided output is compared to determine the level of accuracy and fidelity. - Below is your grading criteria: + Grading rubric: Correctness: If the answer correctly answer the question, below are the details for different scores: - Score 1: the answer is completely incorrect, doesn’t mention anything about the @@ -84,22 +93,38 @@ def test_evaluation_model_output(): - Score 5: the answer correctly answer the question and not missing any major aspect Examples: - Input: This is an input - Provided output: This is an output - Provided ground_truth: This is an output - Score: 4 - Justification: This is a justification - - Input: This is an example input 2 - Provided output: This is an example output 2 - Provided ground_truth: This is an output - Score: 4 - Justification: This is an example justification 2 - - And you'll need to submit your grading for the correctness of the output, - using the following in json format: - Score: [your score number for the correctness of the output] - Justification: [your step by step reasoning about the correctness of the output] + Input: + This is an input + + Output: + This is an output + + Additional information used by the model: + key: ground_truth + value: + This is an output + + score: 4 + justification: This is a justification + + + Input: + This is an example input 2 + + Output: + This is an example output 2 + + Additional information used by the model: + key: ground_truth + value: + This is an output + + score: 4 + justification: This is an example justification 2 + + You must return the following fields in your response one below the other: + score: Your numerical score for the model's correctness based on the rubric + justification: Your step-by-step reasoning about the model's correctness score """ prompt1 = model1["eval_prompt"].format( input="This is an input", output="This is an output", grading_context_columns=args_string @@ -133,16 +158,22 @@ def test_evaluation_model_output(): } args_string = "" expected_prompt2 = """ - Please act as an impartial judge and evaluate the quality of the provided output which - attempts to produce output for the provided input based on a provided information. - You'll be given a grading format below which you'll call for each provided information, - input and provided output to submit your justification and score to compute the correctness of - the output. + Task: + You are an impartial judge. You will be given an input that was sent to a machine + learning model, and you will be given an output that the model produced. You + may also be given additional information that was used by the model to generate the output. + + Your task is to determine a numerical score called correctness based on the input and output. + A definition of correctness and a grading rubric are provided below. + You must use the grading rubric to determine your score. You must also justify your score. + + Examples could be included below for reference. Make sure to use them as references and to + understand them before completing the task. Input: This is an input - Provided output: + Output: This is an output Metric definition: @@ -151,7 +182,7 @@ def test_evaluation_model_output(): truth serves as a benchmark against which the provided output is compared to determine the level of accuracy and fidelity. - Below is your grading criteria: + Grading rubric: Correctness: If the answer correctly answer the question, below are the details for different scores: - Score 1: the answer is completely incorrect, doesn’t mention anything about the question @@ -162,10 +193,9 @@ def test_evaluation_model_output(): critical aspect. - Score 5: the answer correctly answer the question and not missing any major aspect - And you'll need to submit your grading for the correctness of the output, - using the following in json format: - Score: [your score number for the correctness of the output] - Justification: [your step by step reasoning about the correctness of the output] + You must return the following fields in your response one below the other: + score: Your numerical score for the model's correctness based on the rubric + justification: Your step-by-step reasoning about the model's correctness score """ prompt2 = model2["eval_prompt"].format( input="This is an input", output="This is an output", grading_context_columns=args_string @@ -184,28 +214,33 @@ def test_no_examples(examples): args_string = "" expected_prompt2 = """ - Please act as an impartial judge and evaluate the quality of the provided output which - attempts to produce output for the provided input based on a provided information. - You'll be given a grading format below which you'll call for each provided information, - input and provided output to submit your justification and score to compute the correctness of - the output. + Task: + You are an impartial judge. You will be given an input that was sent to a machine + learning model, and you will be given an output that the model produced. You + may also be given additional information that was used by the model to generate the output. + + Your task is to determine a numerical score called correctness based on the input and output. + A definition of correctness and a grading rubric are provided below. + You must use the grading rubric to determine your score. You must also justify your score. + + Examples could be included below for reference. Make sure to use them as references and to + understand them before completing the task. Input: This is an input - Provided output: + Output: This is an output Metric definition: definition - Below is your grading criteria: + Grading rubric: grading prompt - And you'll need to submit your grading for the correctness of the output, - using the following in json format: - Score: [your score number for the correctness of the output] - Justification: [your step by step reasoning about the correctness of the output] + You must return the following fields in your response one below the other: + score: Your numerical score for the model's correctness based on the rubric + justification: Your step-by-step reasoning about the model's correctness score """ prompt2 = model["eval_prompt"].format( input="This is an input", output="This is an output", grading_context_columns=args_string diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index ec7b406f2a857..4d5db0ef08d76 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -37,7 +37,7 @@ properly_formatted_openai_response1 = { "candidates": [ { - "text": '{\n "Score": 3,\n "Justification": "' f"{openai_justification1}" '"\n}', + "text": '{\n "score": 3,\n "justification": "' f"{openai_justification1}" '"\n}', "metadata": {"finish_reason": "stop"}, } ], @@ -53,7 +53,7 @@ properly_formatted_openai_response2 = { "candidates": [ { - "text": '{\n "Score": 2,\n "Justification": "The provided output gives a correct ' + "text": '{\n "score": 2,\n "justification": "The provided output gives a correct ' "and adequate explanation of what Apache Spark is, covering its main functions and " "components like Spark SQL, Spark Streaming, and MLlib. However, it misses a " "critical aspect, which is Spark's development as a response to the limitations " @@ -78,7 +78,7 @@ incorrectly_formatted_openai_response = { "candidates": [ { - "text": "Score: 2\nJustification: \n\nThe provided output gives some relevant " + "text": "score: 2\njustification: \n\nThe provided output gives some relevant " "information about MLflow including its capabilities such as experiment tracking, " "model packaging, versioning, and deployment. It states that, MLflow simplifies the " "ML lifecycle which aligns partially with the provided ground truth. However, it " @@ -242,20 +242,24 @@ def test_make_genai_metric_correct_response(): assert mock_predict_function.call_count == 1 assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo" assert mock_predict_function.call_args[0][1] == { - "prompt": "\nPlease act as an impartial judge and evaluate the quality of " - "the provided output which\nattempts to produce output for the provided input " - "based on a provided information.\n\nYou'll be given a grading format below which " - "you'll call for each provided information,\ninput and provided output to submit " - "your justification and score to compute the fake_metric of\nthe output." - "\n\nInput:\ninput\n\nProvided output:\nprediction\n\nProvided targets: " - "ground_truth\n\nMetric definition:\nFake metric definition\n\nBelow is your grading " - "criteria:\nFake metric grading prompt\n\nExamples:\n\nInput: example-input\n\n" - "Provided output: example-output\n\nProvided targets: example-ground_truth\n\n" - "Score: 4\nJustification: example-justification\n\n \n\nAnd you'll need to " - "submit your grading for the fake_metric of the output,\nusing the following in json " - "format:\nScore: [your score number for the fake_metric of the " - "output]\nJustification: [your step by step reasoning about the fake_metric of the " - "output]\n ", + "prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was " + "sent to a machine\nlearning model, and you will be given an output that the model " + "produced. You\nmay also be given additional information that was used by the model " + "to generate the output.\n\nYour task is to determine a numerical score called " + "fake_metric based on the input and output.\nA definition of " + "fake_metric and a grading rubric are provided below.\nYou must use the " + "grading rubric to determine your score. You must also justify your score." + "\n\nExamples could be included below for reference. Make sure to use them as " + "references and to\nunderstand them before completing the task.\n" + "\nInput:\ninput\n\nOutput:\nprediction\n\nAdditional information used by the model:\n" + "key: targets\nvalue:\nground_truth\n\nMetric definition:\nFake metric definition\n\n" + "Grading rubric:\nFake metric grading prompt\n\nExamples:\n\nInput:\nexample-input\n\n" + "Output:\nexample-output\n\nAdditional information used by the model:\nkey: targets\n" + "value:\nexample-ground_truth\n\nscore: 4\njustification: " + "example-justification\n \n\nYou must return the following fields in your " + "response one below the other:\nscore: Your numerical score for the model's " + "fake_metric based on the rubric\njustification: Your step-by-step reasoning about " + "the model's fake_metric score\n ", "temperature": 0.0, "max_tokens": 200, "top_p": 1.0, @@ -432,7 +436,9 @@ def test_make_genai_metric_failure(): def test_format_args_string(): variable_string = _format_args_string(["foo", "bar"], {"foo": ["foo"], "bar": ["bar"]}, 0) - assert variable_string == "Provided foo: foo\nProvided bar: bar" + assert variable_string == ( + "Additional information used by the model:\nkey: foo\nvalue:\nfoo" "\nkey: bar\nvalue:\nbar" + ) with pytest.raises( MlflowException, @@ -446,7 +452,7 @@ def test_extract_score_and_justification(): output={ "candidates": [ { - "text": '{"Score": 4, "Justification": "This is a justification"}', + "text": '{"score": 4, "justification": "This is a justification"}', } ] } @@ -459,7 +465,7 @@ def test_extract_score_and_justification(): output={ "candidates": [ { - "text": "Score: 2 \nJustification: This is a justification", + "text": "score: 2 \njustification: This is a justification", } ] } @@ -482,7 +488,7 @@ def test_extract_score_and_justification(): output={ "candidates": [ { - "text": '{"Score": "4", "Justification": "This is a justification"}', + "text": '{"score": "4", "justification": "This is a justification"}', } ] } @@ -495,7 +501,7 @@ def test_extract_score_and_justification(): output={ "candidates": [ { - "text": '{"Score": 4, "Justification": {"foo": "bar"}}', + "text": '{"score": 4, "justification": {"foo": "bar"}}', } ] } @@ -524,27 +530,32 @@ def test_correctness_metric(): assert mock_predict_function.call_count == 1 assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo" assert mock_predict_function.call_args[0][1] == { - "prompt": "\nPlease act as an impartial judge and evaluate the quality of " - "the provided output which\nattempts to produce output for the provided input " - "based on a provided information.\n\nYou'll be given a grading format below which " - "you'll call for each provided information,\ninput and provided output to submit " - "your justification and score to compute the correctness of\nthe output.\n" + "prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was " + "sent to a machine\nlearning model, and you will be given an output that the model " + "produced. You\nmay also be given additional information that was used by the model " + "to generate the output.\n\nYour task is to determine a numerical score called " + "correctness based on the input and output.\nA definition of " + "correctness and a grading rubric are provided below.\nYou must use the " + "grading rubric to determine your score. You must also justify your score." + "\n\nExamples could be included below for reference. Make sure to use them as " + "references and to\nunderstand them before completing the task.\n" f"\nInput:\n{input}\n" - f"\nProvided output:\n{mlflow_prediction}\n" - f"\nProvided targets: {mlflow_ground_truth}\n" + f"\nOutput:\n{mlflow_prediction}\n" + "\nAdditional information used by the model:\nkey: targets\nvalue:\n" + f"{mlflow_ground_truth}\n" f"\nMetric definition:\n{CorrectnessMetric.definition}\n" - f"\nBelow is your grading criteria:\n{CorrectnessMetric.grading_prompt}\n" + f"\nGrading rubric:\n{CorrectnessMetric.grading_prompt}\n" "\nExamples:\n" - f"\nInput: {mlflow_example.input}\n" - f"\nProvided output: {mlflow_example.output}\n" - f"\nProvided targets: {mlflow_ground_truth}\n" - f"\nScore: {mlflow_example.score}\n" - f"Justification: {mlflow_example.justification}\n\n \n\n" - "And you'll need to submit your grading for the correctness of the output," - "\nusing the following in json format:\n" - "Score: [your score number for the correctness of the output]\n" - "Justification: [your step by step reasoning about the correctness of the output]" - "\n ", + f"\nInput:\n{mlflow_example.input}\n" + f"\nOutput:\n{mlflow_example.output}\n" + "\nAdditional information used by the model:\nkey: targets\nvalue:\n" + f"{mlflow_ground_truth}\n" + f"\nscore: {mlflow_example.score}\n" + f"justification: {mlflow_example.justification}\n \n" + "\nYou must return the following fields in your response one below the other:\nscore: " + "Your numerical score for the model's correctness based on the " + "rubric\njustification: Your step-by-step reasoning about the model's " + "correctness score\n ", **CorrectnessMetric.parameters, } @@ -585,22 +596,26 @@ def test_relevance_metric(): assert mock_predict_function.call_count == 1 assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo" assert mock_predict_function.call_args[0][1] == { - "prompt": "\nPlease act as an impartial judge and evaluate the quality of " - "the provided output which\nattempts to produce output for the provided input " - "based on a provided information.\n\nYou'll be given a grading format below which " - "you'll call for each provided information,\ninput and provided output to submit " - "your justification and score to compute the relevance of\nthe output.\n" + "prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was " + "sent to a machine\nlearning model, and you will be given an output that the model " + "produced. You\nmay also be given additional information that was used by the model " + "to generate the output.\n\nYour task is to determine a numerical score called " + "relevance based on the input and output.\nA definition of " + "relevance and a grading rubric are provided below.\nYou must use the " + "grading rubric to determine your score. You must also justify your score." + "\n\nExamples could be included below for reference. Make sure to use them as " + "references and to\nunderstand them before completing the task.\n" f"\nInput:\n{input}\n" - f"\nProvided output:\n{mlflow_prediction}\n" - f"\nProvided context: {mlflow_ground_truth}\n" + f"\nOutput:\n{mlflow_prediction}\n" + "\nAdditional information used by the model:\nkey: context\nvalue:\n" + f"{mlflow_ground_truth}\n" f"\nMetric definition:\n{RelevanceMetric.definition}\n" - f"\nBelow is your grading criteria:\n{RelevanceMetric.grading_prompt}\n" + f"\nGrading rubric:\n{RelevanceMetric.grading_prompt}\n" "\n\n" - "\nAnd you'll need to submit your grading for the relevance of the output," - "\nusing the following in json format:\n" - "Score: [your score number for the relevance of the output]\n" - "Justification: [your step by step reasoning about the relevance of the output]" - "\n ", + "\nYou must return the following fields in your response one below the other:\nscore: " + "Your numerical score for the model's relevance based on the " + "rubric\njustification: Your step-by-step reasoning about the model's " + "relevance score\n ", **RelevanceMetric.parameters, } @@ -642,24 +657,27 @@ def test_strict_correctness_metric(): assert mock_predict_function.call_count == 1 assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo-16k" assert mock_predict_function.call_args[0][1] == { - "prompt": "\nPlease act as an impartial judge and evaluate the quality of " - "the provided output which\nattempts to produce output for the provided input " - "based on a provided information.\n\nYou'll be given a grading format below which " - "you'll call for each provided information,\ninput and provided output to submit " - "your justification and score to compute the strict_correctness of\nthe output.\n" + "prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was " + "sent to a machine\nlearning model, and you will be given an output that the model " + "produced. You\nmay also be given additional information that was used by the model " + "to generate the output.\n\nYour task is to determine a numerical score called " + "strict_correctness based on the input and output.\nA definition of " + "strict_correctness and a grading rubric are provided below.\nYou must use the " + "grading rubric to determine your score. You must also justify your score." + "\n\nExamples could be included below for reference. Make sure to use them as " + "references and to\nunderstand them before completing the task.\n" f"\nInput:\n{input}\n" - f"\nProvided output:\n{mlflow_prediction}\n" - f"\nProvided targets: {mlflow_ground_truth}\n" + f"\nOutput:\n{mlflow_prediction}\n" + "\nAdditional information used by the model:\nkey: targets\nvalue:\n" + f"{mlflow_ground_truth}\n" f"\nMetric definition:\n{StrictCorrectnessMetric.definition}\n" - f"\nBelow is your grading criteria:\n{StrictCorrectnessMetric.grading_prompt}\n" + f"\nGrading rubric:\n{StrictCorrectnessMetric.grading_prompt}\n" "\nExamples:\n" f"{examples}\n" - "\nAnd you'll need to submit your grading for the strict_correctness of the output," - "\nusing the following in json format:\n" - "Score: [your score number for the strict_correctness of the output]\n" - "Justification: [your step by step reasoning about the strict_correctness of the " - "output]" - "\n ", + "\nYou must return the following fields in your response one below the other:\nscore: " + "Your numerical score for the model's strict_correctness based on the " + "rubric\njustification: Your step-by-step reasoning about the model's " + "strict_correctness score\n ", **StrictCorrectnessMetric.parameters, } diff --git a/tests/metrics/test_base.py b/tests/metrics/test_base.py index 19065fbf31a6d..f0aa54bd6243e 100644 --- a/tests/metrics/test_base.py +++ b/tests/metrics/test_base.py @@ -14,18 +14,35 @@ def test_evaluation_example_str(): ) ) example1_expected = """ - Input: This is an input - Provided output: This is an output - Provided foo: bar - Score: 5 - Justification: This is a justification + Input: + This is an input + + Output: + This is an output + + Additional information used by the model: + key: foo + value: + bar + + score: 5 + justification: This is a justification """ assert re.sub(r"\s+", "", example1_expected) == re.sub(r"\s+", "", example1) - example2 = str(EvaluationExample(input="This is an input", output="This is an output", score=5)) + example2 = str( + EvaluationExample( + input="This is an input", output="This is an output", score=5, justification="It works" + ) + ) example2_expected = """ - Input: This is an input - Provided output: This is an output - Score: 5 + Input: + This is an input + + Output: + This is an output + + score: 5 + justification: It works """ assert re.sub(r"\s+", "", example2_expected) == re.sub(r"\s+", "", example2) From 760718a46484a55daf48c6c8dafe8571a65a6784 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Tue, 17 Oct 2023 16:00:59 +0900 Subject: [PATCH 003/101] Insert devtools section in PR description (#9963) Signed-off-by: harupy --- .github/workflows/advice.js | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/.github/workflows/advice.js b/.github/workflows/advice.js index 1d418bed1482d..9886d5dfa6030 100644 --- a/.github/workflows/advice.js +++ b/.github/workflows/advice.js @@ -29,13 +29,35 @@ module.exports = async ({ context, github }) => { const { user, body } = context.payload.pull_request; const messages = []; - const codespacesBadge = `[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/${user.login}/mlflow/pull/${issue_number}?quickstart=1)`; - if (body && !body.includes(codespacesBadge)) { + const title = "🛠 DevTools 🛠"; + if (body && !body.includes(title)) { + const codespacesBadge = `[![Open in GitHub Codespaces](https://github.com/codespaces/badge.svg)](https://codespaces.new/${user.login}/mlflow/pull/${issue_number}?quickstart=1)`; + const newSection = ` +
${title} +

+ +${codespacesBadge} + +#### Install mlflow from this PR + +\`\`\` +pip install git+https://github.com/mlflow/mlflow.git@refs/pull/${issue_number}/merge +\`\`\` + +#### Checkout with GitHub CLI + +\`\`\` +gh pr checkout ${issue_number} +\`\`\` + +

+
+`.trim(); await github.rest.pulls.update({ owner, repo, pull_number: issue_number, - body: `${codespacesBadge}\n\n${body}`, + body: `${newSection}\n\n${body}`, }); } From 5f3a391900b3f8ff5cf243b3844f06e44d8d1dc8 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Tue, 17 Oct 2023 16:11:36 +0900 Subject: [PATCH 004/101] Call `hashlib.md5` with `usedforsecurity=False` for FIPS (#9961) Signed-off-by: harupy --- examples/databricks/multipart.py | 2 +- mlflow/data/digest_utils.py | 4 ++-- mlflow/models/evaluation/base.py | 5 ++--- mlflow/store/tracking/file_store.py | 7 +++---- mlflow/utils/__init__.py | 20 ++++++++++++++++++++ tests/evaluate/test_evaluation.py | 4 ++-- tests/resources/data/dataset.py | 4 ++-- tests/store/tracking/test_file_store.py | 4 ++-- 8 files changed, 34 insertions(+), 16 deletions(-) diff --git a/examples/databricks/multipart.py b/examples/databricks/multipart.py index bbf0667913ea1..41a116f7e1688 100644 --- a/examples/databricks/multipart.py +++ b/examples/databricks/multipart.py @@ -43,7 +43,7 @@ def show_system_info(): def md5_checksum(path): - file_hash = hashlib.md5() + file_hash = hashlib.sha256() with open(path, "rb") as f: while chunk := f.read(1024**2): file_hash.update(chunk) diff --git a/mlflow/data/digest_utils.py b/mlflow/data/digest_utils.py index 692a8ad199a5b..8f9fdf2fbfcab 100644 --- a/mlflow/data/digest_utils.py +++ b/mlflow/data/digest_utils.py @@ -1,10 +1,10 @@ -import hashlib from typing import Any, List from packaging.version import Version from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE +from mlflow.utils import _insecure_md5 MAX_ROWS = 10000 @@ -159,7 +159,7 @@ def get_normalized_md5_digest(elements: List[Any]) -> str: INVALID_PARAMETER_VALUE, ) - md5 = hashlib.md5() + md5 = _insecure_md5() for element in elements: md5.update(element) diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 47c81b0e17940..365db6618bb98 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -1,4 +1,3 @@ -import hashlib import json import logging import math @@ -31,7 +30,7 @@ from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE from mlflow.tracking.artifact_utils import _download_artifact_from_uri from mlflow.tracking.client import MlflowClient -from mlflow.utils import _get_fully_qualified_class_name +from mlflow.utils import _get_fully_qualified_class_name, _insecure_md5 from mlflow.utils.annotations import developer_stable, experimental from mlflow.utils.class_utils import _get_class_from_string from mlflow.utils.file_utils import TempDir @@ -597,7 +596,7 @@ def __init__( ) # generate dataset hash - md5_gen = hashlib.md5() + md5_gen = _insecure_md5() _gen_md5_for_arraylike_obj(md5_gen, self._features_data) if self._labels_data is not None: _gen_md5_for_arraylike_obj(md5_gen, self._labels_data) diff --git a/mlflow/store/tracking/file_store.py b/mlflow/store/tracking/file_store.py index 2b1019aa7dbf1..afcfecc2d8423 100644 --- a/mlflow/store/tracking/file_store.py +++ b/mlflow/store/tracking/file_store.py @@ -1,4 +1,3 @@ -import hashlib import json import logging import os @@ -46,7 +45,7 @@ SEARCH_MAX_RESULTS_THRESHOLD, ) from mlflow.store.tracking.abstract_store import AbstractStore -from mlflow.utils import get_results_from_paginated_fn +from mlflow.utils import _insecure_md5, get_results_from_paginated_fn from mlflow.utils.file_utils import ( append_to, exists, @@ -1128,13 +1127,13 @@ def log_inputs(self, run_id: str, datasets: Optional[List[DatasetInput]] = None) @staticmethod def _get_dataset_id(dataset_name: str, dataset_digest: str) -> str: - md5 = hashlib.md5(dataset_name.encode("utf-8")) + md5 = _insecure_md5(dataset_name.encode("utf-8")) md5.update(dataset_digest.encode("utf-8")) return md5.hexdigest() @staticmethod def _get_input_id(dataset_id: str, run_id: str) -> str: - md5 = hashlib.md5(dataset_id.encode("utf-8")) + md5 = _insecure_md5(dataset_id.encode("utf-8")) md5.update(run_id.encode("utf-8")) return md5.hexdigest() diff --git a/mlflow/utils/__init__.py b/mlflow/utils/__init__.py index fd2563cefe3be..a81e70bd29b5f 100644 --- a/mlflow/utils/__init__.py +++ b/mlflow/utils/__init__.py @@ -1,8 +1,10 @@ import base64 +import hashlib import inspect import logging import socket import subprocess +import sys import uuid from contextlib import closing from itertools import islice @@ -268,3 +270,21 @@ def get_results_from_paginated_fn(paginated_fn, max_results_per_page, max_result else: break return all_results + + +def _insecure_md5(string=b""): + """ + Do not use this function for security purposes (e.g., password hashing). + + In Python >= 3.9, `hashlib.md5` fails in FIPS-compliant environments. This function + provides a workaround for this issue by using `hashlib.md5` with `usedforsecurity=False`. + + References: + - https://github.com/mlflow/mlflow/issues/9905 + - https://docs.python.org/3/library/hashlib.html + """ + return ( + hashlib.md5(string, usedforsecurity=False) + if sys.version_info >= (3, 9) + else hashlib.md5(string) + ) diff --git a/tests/evaluate/test_evaluation.py b/tests/evaluate/test_evaluation.py index b9203dd5d6f2d..88fca6e18c16d 100644 --- a/tests/evaluate/test_evaluation.py +++ b/tests/evaluate/test_evaluation.py @@ -1,4 +1,3 @@ -import hashlib import io import json import os @@ -55,6 +54,7 @@ from mlflow.pyfunc import _ServedPyFuncModel from mlflow.pyfunc.scoring_server.client import ScoringServerClient from mlflow.tracking.artifact_utils import get_artifact_uri +from mlflow.utils import _insecure_md5 from mlflow.utils.file_utils import TempDir @@ -710,7 +710,7 @@ def test_dataset_metadata(): def test_gen_md5_for_arraylike_obj(): def get_md5(data): - md5_gen = hashlib.md5() + md5_gen = _insecure_md5() _gen_md5_for_arraylike_obj(md5_gen, data) return md5_gen.hexdigest() diff --git a/tests/resources/data/dataset.py b/tests/resources/data/dataset.py index 50b9b31d61c09..b09e2d3d5df3a 100644 --- a/tests/resources/data/dataset.py +++ b/tests/resources/data/dataset.py @@ -1,5 +1,4 @@ import base64 -import hashlib import json from typing import Any, Dict, List, Optional @@ -9,6 +8,7 @@ from mlflow.data.dataset import Dataset from mlflow.types import Schema from mlflow.types.utils import _infer_schema +from mlflow.utils import _insecure_md5 from tests.resources.data.dataset_source import TestDatasetSource @@ -29,7 +29,7 @@ def _compute_digest(self) -> str: Computes a digest for the dataset. Called if the user doesn't supply a digest when constructing the dataset. """ - hash_md5 = hashlib.md5() + hash_md5 = _insecure_md5() for hash_part in pd.util.hash_array(np.array(self._data_list)): hash_md5.update(hash_part) return base64.b64encode(hash_md5.digest()).decode("ascii") diff --git a/tests/store/tracking/test_file_store.py b/tests/store/tracking/test_file_store.py index 2f205dcf6ddd0..52655419c930d 100644 --- a/tests/store/tracking/test_file_store.py +++ b/tests/store/tracking/test_file_store.py @@ -1,4 +1,3 @@ -import hashlib import json import os import posixpath @@ -38,6 +37,7 @@ from mlflow.store.entities.paged_list import PagedList from mlflow.store.tracking import SEARCH_MAX_RESULTS_DEFAULT from mlflow.store.tracking.file_store import FileStore +from mlflow.utils import _insecure_md5 from mlflow.utils.file_utils import TempDir, path_to_local_file_uri, read_yaml, write_yaml from mlflow.utils.mlflow_tags import MLFLOW_DATASET_CONTEXT, MLFLOW_LOGGED_MODELS, MLFLOW_RUN_NAME from mlflow.utils.name_utils import _EXPERIMENT_ID_FIXED_WIDTH, _GENERATOR_PREDICATES @@ -2493,7 +2493,7 @@ def assert_expected_input_storage_ids_present(run, dataset_storage_ids): inputs_dir = os.path.join(run_dir, FileStore.INPUTS_FOLDER_NAME) expected_input_storage_ids = [] for dataset_storage_id in dataset_storage_ids: - md5 = hashlib.md5(dataset_storage_id.encode("utf-8")) + md5 = _insecure_md5(dataset_storage_id.encode("utf-8")) md5.update(run.info.run_id.encode("utf-8")) expected_input_storage_ids.append(md5.hexdigest()) assert set(os.listdir(inputs_dir)) == set(expected_input_storage_ids) From b431cd1f35520b638ab707a3851ae5da780427e2 Mon Sep 17 00:00:00 2001 From: lightnessofbein <39873967+lightnessofbein@users.noreply.github.com> Date: Tue, 17 Oct 2023 12:57:58 +0200 Subject: [PATCH 005/101] Enabled Ruff W (#9970) Signed-off-by: Serhii Fedash Signed-off-by: mlflow-automation Co-authored-by: mlflow-automation --- examples/flower_classifier/image_pyfunc.py | 2 +- examples/sentence_transformers/simple.py | 2 +- mlflow/metrics/__init__.py | 30 +++++++++---------- mlflow/models/docker_utils.py | 4 +-- mlflow/sagemaker/__init__.py | 4 +-- mlflow/utils/docstring_utils.py | 10 +++---- pyproject.toml | 1 + tests/pyfunc/test_scoring_server.py | 6 ++-- tests/sklearn/test_sklearn_model_export.py | 2 +- tests/store/tracking/test_sqlalchemy_store.py | 30 +++++++++---------- 10 files changed, 46 insertions(+), 45 deletions(-) diff --git a/examples/flower_classifier/image_pyfunc.py b/examples/flower_classifier/image_pyfunc.py index bb8b43dbd3c27..ba77072f78078 100644 --- a/examples/flower_classifier/image_pyfunc.py +++ b/examples/flower_classifier/image_pyfunc.py @@ -173,7 +173,7 @@ def _load_pyfunc(path): - conda-forge dependencies: - python=={python_version} - - pip=={pip_version} + - pip=={pip_version} - pip: - mlflow>=1.6 - pillow=={pillow_version} diff --git a/examples/sentence_transformers/simple.py b/examples/sentence_transformers/simple.py index 3993cba2f0628..f303362f83bbc 100644 --- a/examples/sentence_transformers/simple.py +++ b/examples/sentence_transformers/simple.py @@ -38,5 +38,5 @@ 2.37922110e-02 -2.28897743e-02 3.89375277e-02 3.02067865e-02] [ 4.81191138e-03 -9.33756605e-02 6.95968643e-02 8.09735525e-03 ... - 6.57437667e-02 -2.72239652e-02 4.02687863e-02 -1.05599344e-01]] + 6.57437667e-02 -2.72239652e-02 4.02687863e-02 -1.05599344e-01]] """ diff --git a/mlflow/metrics/__init__.py b/mlflow/metrics/__init__.py index 12a167f6f0a3c..1cfdd80336d03 100644 --- a/mlflow/metrics/__init__.py +++ b/mlflow/metrics/__init__.py @@ -45,8 +45,8 @@ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for calculating latency. Latency is determined by the time it takes to generate a -prediction for a given input. Note that computing latency requires each row to be predicted -sequentially, which will likely slow down the evaluation process. +prediction for a given input. Note that computing latency requires each row to be predicted +sequentially, which will likely slow down the evaluation process. """ # general text metrics @@ -58,7 +58,7 @@ """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. -A metric for calculating token_count. Token count is calculated using tiktoken by using the +A metric for calculating token_count. Token count is calculated using tiktoken by using the `cl100k_base` tokenizer. """ @@ -72,11 +72,11 @@ """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. -A metric for evaluating `toxicity`_ using the model `roberta-hate-speech-dynabench-r4`_, -which defines hate as "abusive speech targeting specific group characteristics, such as +A metric for evaluating `toxicity`_ using the model `roberta-hate-speech-dynabench-r4`_, +which defines hate as "abusive speech targeting specific group characteristics, such as ethnic origin, religion, gender, or sexual orientation." -The score ranges from 0 to 1, where scores closer to 1 are more toxic. The default threshold +The score ranges from 0 to 1, where scores closer to 1 are more toxic. The default threshold for a text to be considered "toxic" is 0.5. Aggregations calculated for this metric: @@ -98,7 +98,7 @@ A metric for evaluating `perplexity`_ using the model gpt2. -The score ranges from 0 to infinity, where a lower score means that the model is better at +The score ranges from 0 to infinity, where a lower score means that the model is better at predicting the given text and a higher score means that the model is not likely to predict the text. Aggregations calculated for this metric: @@ -117,7 +117,7 @@ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for calculating `flesch kincaid grade level`_ using `textstat`_. - + This metric outputs a number that approximates the grade level needed to comprehend the text, which will likely range from around 0 to 15 (although it is not limited to this range). @@ -140,7 +140,7 @@ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for calculating `automated readability index`_ using `textstat`_. - + This metric outputs a number that approximates the grade level needed to comprehend the text, which will likely range from around 0 to 15 (although it is not limited to this range). @@ -178,7 +178,7 @@ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for evaluating `rouge1`_. - + The score ranges from 0 to 1, where a higher score indicates higher similarity. `rouge1`_ uses unigram based scoring to calculate similarity. @@ -198,7 +198,7 @@ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for evaluating `rouge2`_. - + The score ranges from 0 to 1, where a higher score indicates higher similarity. `rouge2`_ uses bigram based scoring to calculate similarity. @@ -218,7 +218,7 @@ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for evaluating `rougeL`_. - + The score ranges from 0 to 1, where a higher score indicates higher similarity. `rougeL`_ uses unigram based scoring to calculate similarity. @@ -238,7 +238,7 @@ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for evaluating `rougeLsum`_. - + The score ranges from 0 to 1, where a higher score indicates higher similarity. `rougeLsum`_ uses longest common subsequence based scoring to calculate similarity. @@ -298,7 +298,7 @@ A metric for evaluating `r2_score`_. This metric computes an aggregate score for the coefficient of determination. R2 ranges from -negative infinity to 1, and measures the percentage of variance explained by the predictor +negative infinity to 1, and measures the percentage of variance explained by the predictor variables in a regression. .. _r2_score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html @@ -348,7 +348,7 @@ A metric for evaluating `precision`_ for classification. This metric computes an aggregate score between 0 and 1 for the precision of -classification task. +classification task. .. _precision: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html """ diff --git a/mlflow/models/docker_utils.py b/mlflow/models/docker_utils.py index 30931fde329a1..734f0eca0ec44 100644 --- a/mlflow/models/docker_utils.py +++ b/mlflow/models/docker_utils.py @@ -105,8 +105,8 @@ def _get_maven_proxy(): {custom_setup_steps} -# granting read/write access and conditional execution authority to all child directories -# and files to allow for deployment to AWS Sagemaker Serverless Endpoints +# granting read/write access and conditional execution authority to all child directories +# and files to allow for deployment to AWS Sagemaker Serverless Endpoints # (see https://docs.aws.amazon.com/sagemaker/latest/dg/serverless-endpoints.html) RUN chmod o+rwX /opt/mlflow/ diff --git a/mlflow/sagemaker/__init__.py b/mlflow/sagemaker/__init__.py index 6a9a8be15b0b5..b647dc2405c1d 100644 --- a/mlflow/sagemaker/__init__.py +++ b/mlflow/sagemaker/__init__.py @@ -2396,8 +2396,8 @@ def update_deployment( Defaults to ``None``. - ``variant_name``: A string specifying the desired name when creating a - production variant. Defaults to ``None``. - - ``async_inference_config``: A dictionary specifying the async config + production variant. Defaults to ``None``. + - ``async_inference_config``: A dictionary specifying the async config configuration. Defaults to ``None``. - ``env``: A dictionary specifying environment variables as key-value pairs to be set for the deployed model. Defaults to ``None``. diff --git a/mlflow/utils/docstring_utils.py b/mlflow/utils/docstring_utils.py index bba712b1e5202..c2c41bbecf5ac 100644 --- a/mlflow/utils/docstring_utils.py +++ b/mlflow/utils/docstring_utils.py @@ -191,7 +191,7 @@ class that describes the model's inputs and outputs. If not specified but an based on the supplied input example and model. To disable automatic signature inference when providing an input example, set ``signature`` to ``False``. To manually infer a model signature, call -:py:func:`infer_signature() ` on datasets +:py:func:`infer_signature() ` on datasets with valid model inputs, such as a training dataset with the target column omitted, and valid model outputs, like model predictions made on the training dataset, for example: @@ -209,10 +209,10 @@ class that describes the model's inputs and outputs. If not specified but an as a hint of what data to feed the model. It will be converted to a Pandas DataFrame and then serialized to json using the Pandas split-oriented format, or a numpy array where the example will be serialized to json -by converting it to a list. If input example is a tuple, then the first element -must be a valid model input, and the second element must be a valid params -dictionary that could be used for model inference. Bytes are base64-encoded. -When the ``signature`` parameter is ``None``, the input example is used to +by converting it to a list. If input example is a tuple, then the first element +must be a valid model input, and the second element must be a valid params +dictionary that could be used for model inference. Bytes are base64-encoded. +When the ``signature`` parameter is ``None``, the input example is used to infer a model signature. """, } diff --git a/pyproject.toml b/pyproject.toml index db7dd6df39029..8feaec189b042 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,6 +59,7 @@ select = [ "T20", "TID252", "TID251", + "W", ] force-exclude = true ignore = [ diff --git a/tests/pyfunc/test_scoring_server.py b/tests/pyfunc/test_scoring_server.py index 4aeb77a5f8574..0e2cd9025c780 100644 --- a/tests/pyfunc/test_scoring_server.py +++ b/tests/pyfunc/test_scoring_server.py @@ -418,7 +418,7 @@ def test_parse_json_input_split_oriented(): def test_records_oriented_json_to_df(): # test that datatype for "zip" column is not converted to "int64" jstr = """ - { + { "dataframe_records": [ {"zip":"95120","cost":10.45,"score":8}, {"zip":"95128","cost":23.0,"score":0}, @@ -444,9 +444,9 @@ def test_split_oriented_json_to_df(): { "dataframe_split": { "columns":["zip","cost","count"], - "index":[0,1,2], + "index":[0,1,2], "data":[["95120",10.45,-8],["95128",23.0,-1],["95128",12.1,1000]] - } + } } """ jstr, _ = pyfunc_scoring_server._split_data_and_params(jstr) diff --git a/tests/sklearn/test_sklearn_model_export.py b/tests/sklearn/test_sklearn_model_export.py index 44b68be160b99..98e0c8cf8b6c0 100644 --- a/tests/sklearn/test_sklearn_model_export.py +++ b/tests/sklearn/test_sklearn_model_export.py @@ -709,7 +709,7 @@ def test_sklearn_compatible_with_mlflow_2_4_0(sklearn_knn_model, tmp_path): - setuptools==56.0.0 - wheel==0.40.0 dependencies: - - -r requirements.txt + - -r requirements.txt """ ) tmp_path.joinpath("requirements.txt").write_text( diff --git a/tests/store/tracking/test_sqlalchemy_store.py b/tests/store/tracking/test_sqlalchemy_store.py index c88cf6b444213..e4226c126dc3e 100644 --- a/tests/store/tracking/test_sqlalchemy_store.py +++ b/tests/store/tracking/test_sqlalchemy_store.py @@ -2969,23 +2969,23 @@ def test_insert_large_text_in_dataset_table(self): conn.execute( sqlalchemy.sql.text( f""" - INSERT INTO datasets - (dataset_uuid, - experiment_id, - name, - digest, - dataset_source_type, - dataset_source, - dataset_schema, + INSERT INTO datasets + (dataset_uuid, + experiment_id, + name, + digest, + dataset_source_type, + dataset_source, + dataset_schema, dataset_profile) - VALUES - ('test_uuid', - 0, - 'test_name', - 'test_digest', - 'test_source_type', + VALUES + ('test_uuid', + 0, + 'test_name', + 'test_digest', + 'test_source_type', '{dataset_source}', ' - test_schema', + test_schema', '{dataset_profile}') """ ) From 02cda2aebae2708ea980c2edfcf08e8a33039b36 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Tue, 17 Oct 2023 07:59:27 -0700 Subject: [PATCH 006/101] Add timeout to openai (#9960) Signed-off-by: Prithvi Kannan --- mlflow/metrics/genai/genai_metric.py | 12 +++++++----- mlflow/metrics/genai/model_utils.py | 7 ++++--- tests/metrics/genai/test_model_utils.py | 11 +++++++---- 3 files changed, 18 insertions(+), 12 deletions(-) diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index d525e06c005b0..bf84ad4ef2458 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -89,7 +89,7 @@ def make_genai_metric( aggregations: Optional[List[str]] = None, greater_is_better: bool = True, max_workers: int = 10, - judge_request_timeout: int = 15, + judge_request_timeout: int = 60, ) -> EvaluationMetric: """ Create a genai metric used to evaluate LLM using LLM as a judge in MLflow. @@ -111,7 +111,7 @@ def make_genai_metric( :param max_workers: (Optional) The maximum number of workers to use for judge scoring. Defaults to 10 workers. :param judge_request_timeout: (Optional) The timeout in seconds for each judge scoring request. - Defaults to 15 seconds. + Defaults to 60 seconds. :return: A metric object. @@ -167,9 +167,9 @@ def make_genai_metric( ), examples=[example], version="v1", - model="gateway:/gpt4", + model="openai:/gpt-3.5-turbo-16k", grading_context_columns=["ground_truth"], - parameters={"temperature": 1.0}, + parameters={"temperature": 0.0}, aggregations=["mean", "variance", "p90"], greater_is_better=True, ) @@ -246,7 +246,9 @@ def score_model_on_one_payload( **eval_parameters, } try: - raw_result = model_utils.score_model_on_payload(eval_model, payload) + raw_result = model_utils.score_model_on_payload( + eval_model, payload, judge_request_timeout + ) return _extract_score_and_justification(raw_result) except Exception as e: _logger.info(f"Failed to score model on payload. Error: {e!r}") diff --git a/mlflow/metrics/genai/model_utils.py b/mlflow/metrics/genai/model_utils.py index 102cac0b8fc7b..5ee4edc845c57 100644 --- a/mlflow/metrics/genai/model_utils.py +++ b/mlflow/metrics/genai/model_utils.py @@ -12,13 +12,13 @@ # TODO: improve this name -def score_model_on_payload(model_uri, payload): +def score_model_on_payload(model_uri, payload, timeout): """Call the model identified by the given uri with the given payload.""" prefix, suffix = _parse_model_uri(model_uri) if prefix == "openai": - return _call_openai_api(suffix, payload) + return _call_openai_api(suffix, payload, timeout) elif prefix == "gateway": return _call_gateway_api(suffix, payload) elif prefix in ("model", "runs"): @@ -43,7 +43,7 @@ def _parse_model_uri(model_uri): return scheme, path -def _call_openai_api(openai_uri, payload): +def _call_openai_api(openai_uri, payload, timeout): """Wrapper around the OpenAI API to make it compatible with the MLflow Gateway API.""" from mlflow.gateway.config import RouteConfig from mlflow.gateway.providers.openai import OpenAIProvider @@ -72,6 +72,7 @@ def _call_openai_api(openai_uri, payload): url=append_to_uri_path(openai_provider._request_base_url, "chat/completions"), headers=openai_provider._request_headers, json=openai_provider._add_model_to_payload_if_necessary(payload), + timeout=timeout, ).json() return json.loads(openai_provider._prepare_completion_response_payload(resp).json()) diff --git a/tests/metrics/genai/test_model_utils.py b/tests/metrics/genai/test_model_utils.py index 8611ada0310dd..c79fb06f1dad8 100644 --- a/tests/metrics/genai/test_model_utils.py +++ b/tests/metrics/genai/test_model_utils.py @@ -43,12 +43,12 @@ def test_parse_model_uri_throws_for_malformed(): def test_score_model_on_payload_throws_for_invalid(): with pytest.raises(MlflowException, match="Unknown model uri prefix"): - score_model_on_payload("myprovider:/gpt-3.5-turbo", {}) + score_model_on_payload("myprovider:/gpt-3.5-turbo", {}, 10) def test_score_model_openai_without_key(): with pytest.raises(MlflowException, match="OPENAI_API_KEY environment variable not set"): - score_model_on_payload("openai:/gpt-3.5-turbo", {}) + score_model_on_payload("openai:/gpt-3.5-turbo", {}, 10) def test_score_model_openai(set_envs): @@ -86,7 +86,9 @@ def json(self): } with mock.patch("requests.post", return_value=MockResponse(resp, 200)) as mock_post: - score_model_on_payload("openai:/gpt-3.5-turbo", {"prompt": "my prompt", "temperature": 0.1}) + score_model_on_payload( + "openai:/gpt-3.5-turbo", {"prompt": "my prompt", "temperature": 0.1}, 10 + ) mock_post.assert_called_once_with( url="https://api.openai.com/v1/chat/completions", headers={"Authorization": "Bearer test"}, @@ -95,6 +97,7 @@ def json(self): "temperature": 0.2, "messages": [{"role": "user", "content": "my prompt"}], }, + timeout=10, ) @@ -120,5 +123,5 @@ def test_score_model_gateway(): } with mock.patch("mlflow.gateway.query", return_value=expected_output): - response = score_model_on_payload("gateway:/my-route", {}) + response = score_model_on_payload("gateway:/my-route", {}, 10) assert response == expected_output From ee9df2ab44043cea91bf419d65ba13f091cb1132 Mon Sep 17 00:00:00 2001 From: Facundo Santiago Date: Tue, 17 Oct 2023 12:30:33 -0400 Subject: [PATCH 007/101] OpenAI inference params (#9909) Signed-off-by: Facundo Santiago Signed-off-by: harupy Co-authored-by: Harutaka Kawamura --- docs/source/models.rst | 2 +- .../openai/{pyfunc.py => chat_completions.py} | 39 ++++++++++++++ examples/openai/completions.py | 39 ++++++++++++++ examples/openai/embeddings.py | 35 ++++++++++++ mlflow/openai/__init__.py | 33 +++++++----- mlflow/openai/utils.py | 16 ++++++ tests/openai/test_openai_model_export.py | 54 +++++++++++++++++++ 7 files changed, 204 insertions(+), 14 deletions(-) rename examples/openai/{pyfunc.py => chat_completions.py} (77%) diff --git a/docs/source/models.rst b/docs/source/models.rst index 45201a21d852e..cfa90ec197563 100644 --- a/docs/source/models.rst +++ b/docs/source/models.rst @@ -2551,7 +2551,7 @@ Model with the ``openai`` flavor as a dictionary of the model's attributes. Example: -.. literalinclude:: ../../examples/openai/pyfunc.py +.. literalinclude:: ../../examples/openai/chat_completions.py :language: python diff --git a/examples/openai/pyfunc.py b/examples/openai/chat_completions.py similarity index 77% rename from examples/openai/pyfunc.py rename to examples/openai/chat_completions.py index 3e65c5315fe7f..6482725bdc1cb 100644 --- a/examples/openai/pyfunc.py +++ b/examples/openai/chat_completions.py @@ -5,6 +5,8 @@ import pandas as pd import mlflow +from mlflow.models.signature import ModelSignature +from mlflow.types.schema import ColSpec, ParamSchema, ParamSpec, Schema logging.getLogger("mlflow").setLevel(logging.ERROR) @@ -159,3 +161,40 @@ ] model = mlflow.pyfunc.load_model(model_info.model_uri) print(model.predict(list_of_strings)) + + +print( + """ +# ****************************************************************************** +# Inference parameters with chat completions +# ****************************************************************************** +""" +) +with mlflow.start_run(): + model_info = mlflow.openai.log_model( + model="gpt-3.5-turbo", + task=openai.ChatCompletion, + artifact_path="model", + messages=[{"role": "user", "content": "Tell me a joke about {animal}."}], + signature=ModelSignature( + inputs=Schema([ColSpec(type="string", name=None)]), + outputs=Schema([ColSpec(type="string", name=None)]), + params=ParamSchema( + [ + ParamSpec(name="temperature", default=0, dtype="float"), + ] + ), + ), + ) + + +model = mlflow.pyfunc.load_model(model_info.model_uri) +df = pd.DataFrame( + { + "animal": [ + "cats", + "dogs", + ] + } +) +print(model.predict(df, params={"temperature": 1})) diff --git a/examples/openai/completions.py b/examples/openai/completions.py index c0b5128bb40f9..5cf61bda40234 100644 --- a/examples/openai/completions.py +++ b/examples/openai/completions.py @@ -3,9 +3,18 @@ import openai import mlflow +from mlflow.models.signature import ModelSignature +from mlflow.types.schema import ColSpec, ParamSchema, ParamSpec, Schema assert "OPENAI_API_KEY" in os.environ, " OPENAI_API_KEY environment variable must be set" +print( + """ +# ****************************************************************************** +# Completions indicating prompt template +# ****************************************************************************** +""" +) with mlflow.start_run(): model_info = mlflow.openai.log_model( @@ -17,3 +26,33 @@ model = mlflow.pyfunc.load_model(model_info.model_uri) print(model.predict(["I believe in a better world"])) + + +print( + """ +# ****************************************************************************** +# Completions using inference parameters +# ****************************************************************************** +""" +) +with mlflow.start_run(): + model_info = mlflow.openai.log_model( + model="text-davinci-002", + task=openai.Completion, + artifact_path="model", + prompt="Clasify the following tweet's sentiment: '{tweet}'.", + signature=ModelSignature( + inputs=Schema([ColSpec(type="string", name=None)]), + outputs=Schema([ColSpec(type="string", name=None)]), + params=ParamSchema( + [ + ParamSpec(name="max_tokens", default=16, dtype="long"), + ParamSpec(name="temperature", default=0, dtype="float"), + ParamSpec(name="best_of", default=1, dtype="long"), + ] + ), + ), + ) + +model = mlflow.pyfunc.load_model(model_info.model_uri) +print(model.predict(["I believe in a better world"], params={"temperature": 1, "best_of": 5})) diff --git a/examples/openai/embeddings.py b/examples/openai/embeddings.py index 63c3090392524..9020124a0d18c 100644 --- a/examples/openai/embeddings.py +++ b/examples/openai/embeddings.py @@ -1,12 +1,23 @@ import os +import numpy as np import openai import mlflow +from mlflow.models.signature import ModelSignature +from mlflow.types.schema import ColSpec, ParamSchema, ParamSpec, Schema, TensorSpec assert "OPENAI_API_KEY" in os.environ, " OPENAI_API_KEY environment variable must be set" +print( + """ +# ****************************************************************************** +# Text embeddings +# ****************************************************************************** +""" +) + with mlflow.start_run(): model_info = mlflow.openai.log_model( model="text-embedding-ada-002", @@ -16,3 +27,27 @@ model = mlflow.pyfunc.load_model(model_info.model_uri) print(model.predict(["hello", "world"])) + + +print( + """ +# ****************************************************************************** +# Text embeddings with batch_size parameter +# ****************************************************************************** +""" +) + +with mlflow.start_run(): + mlflow.openai.log_model( + model="text-embedding-ada-002", + task=openai.Embedding, + artifact_path="model", + signature=ModelSignature( + inputs=Schema([ColSpec(type="string", name=None)]), + outputs=Schema([TensorSpec(type=np.dtype("float64"), shape=(-1,))]), + params=ParamSchema([ParamSpec(name="batch_size", dtype="long", default=1024)]), + ), + ) + +model = mlflow.pyfunc.load_model(model_info.model_uri) +print(model.predict(["hello", "world"], params={"batch_size": 16})) diff --git a/mlflow/openai/__init__.py b/mlflow/openai/__init__.py index 666679e512787..f8c1621e8800c 100644 --- a/mlflow/openai/__init__.py +++ b/mlflow/openai/__init__.py @@ -47,7 +47,7 @@ from mlflow.models import Model, ModelInputExample, ModelSignature from mlflow.models.model import MLMODEL_FILE_NAME from mlflow.models.utils import _save_example -from mlflow.openai.utils import _OAITokenHolder +from mlflow.openai.utils import _OAITokenHolder, _validate_model_params from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE from mlflow.tracking._model_registry import DEFAULT_AWAIT_MAX_SLEEP_SECONDS from mlflow.tracking.artifact_utils import _download_artifact_from_uri @@ -358,6 +358,10 @@ def save_model( mlflow_model = Model() if signature is not None: + if signature.params: + _validate_model_params( + task, kwargs, {p.name: p.default for p in signature.params.params} + ) mlflow_model.signature = signature elif task == "chat.completions": messages = kwargs.get("messages", []) @@ -683,13 +687,14 @@ def get_params_list(self, data): else: return data[self.formater.variables].to_dict(orient="records") - def _predict_chat(self, data): + def _predict_chat(self, data, params): import openai from mlflow.openai.api_request_parallel_processor import process_api_requests + _validate_model_params(self.task, self.model, params) messages_list = self.format_completions(self.get_params_list(data)) - requests = [{**self.model, "messages": messages} for messages in messages_list] + requests = [{**self.model, **params, "messages": messages} for messages in messages_list] results = process_api_requests( requests, openai.ChatCompletion, @@ -699,18 +704,20 @@ def _predict_chat(self, data): ) return [r["choices"][0]["message"]["content"] for r in results] - def _predict_completions(self, data): + def _predict_completions(self, data, params): import openai from mlflow.openai.api_request_parallel_processor import process_api_requests + _validate_model_params(self.task, self.model, params) prompts_list = self.format_completions(self.get_params_list(data)) - batch_size = self.api_config.batch_size + batch_size = params.pop("batch_size", self.api_config.batch_size) _logger.debug(f"Requests are being batched by {batch_size} samples.") requests = [ { **self.model, + **params, "prompt": prompts_list[i : i + batch_size], } for i in range(0, len(prompts_list), batch_size) @@ -724,12 +731,13 @@ def _predict_completions(self, data): ) return [row["text"] for batch in results for row in batch["choices"]] - def _predict_embeddings(self, data): + def _predict_embeddings(self, data, params): import openai from mlflow.openai.api_request_parallel_processor import process_api_requests - batch_size = self.api_config.batch_size + _validate_model_params(self.task, self.model, params) + batch_size = params.pop("batch_size", self.api_config.batch_size) _logger.debug(f"Requests are being batched by {batch_size} samples.") first_string_column = _first_string_column(data) @@ -737,6 +745,7 @@ def _predict_embeddings(self, data): requests = [ { **self.model, + **params, "input": texts[i : i + batch_size], } for i in range(0, len(texts), batch_size) @@ -750,9 +759,7 @@ def _predict_embeddings(self, data): ) return [row["embedding"] for batch in results for row in batch["data"]] - def predict( - self, data, params: Optional[Dict[str, Any]] = None # pylint: disable=unused-argument - ): + def predict(self, data, params: Optional[Dict[str, Any]] = None): """ :param data: Model input data. :param params: Additional parameters to pass to the model for inference. @@ -765,11 +772,11 @@ def predict( self.api_token.validate() if self.task == "chat.completions": - return self._predict_chat(data) + return self._predict_chat(data, params or {}) elif self.task == "completions": - return self._predict_completions(data) + return self._predict_completions(data, params or {}) elif self.task == "embeddings": - return self._predict_embeddings(data) + return self._predict_embeddings(data, params or {}) class _TestOpenAIWrapper(_OpenAIWrapper): diff --git a/mlflow/openai/utils.py b/mlflow/openai/utils.py index 6fdbd2ca3f80b..fc4c2441b6b4d 100644 --- a/mlflow/openai/utils.py +++ b/mlflow/openai/utils.py @@ -135,6 +135,22 @@ def request(*args, **kwargs): return _mock_request(new=request) +def _validate_model_params(task, model, params): + if not params: + return + + if any(key in model for key in params): + raise mlflow.MlflowException.invalid_parameter_value( + f"Providing any of {list(model.keys())} as parameters in the signature is not " + "allowed because they were indicated as part of the OpenAI model. Either remove " + "the argument when logging the model or remove the parameter from the signature.", + ) + if "batch_size" in params and task == "chat.completions": + raise mlflow.MlflowException.invalid_parameter_value( + "Parameter `batch_size` is not supported for task `chat.completions`" + ) + + class _OAITokenHolder: def __init__(self, api_type): import openai diff --git a/tests/openai/test_openai_model_export.py b/tests/openai/test_openai_model_export.py index 6e1a776097f44..ec70920b85264 100644 --- a/tests/openai/test_openai_model_export.py +++ b/tests/openai/test_openai_model_export.py @@ -2,6 +2,7 @@ import json from unittest import mock +import numpy as np import openai import openai.error import pandas as pd @@ -11,11 +12,13 @@ import mlflow import mlflow.pyfunc.scoring_server as pyfunc_scoring_server +from mlflow.models.signature import ModelSignature from mlflow.openai.utils import ( _mock_chat_completion_response, _mock_models_retrieve_response, _mock_request, ) +from mlflow.types.schema import ColSpec, ParamSchema, ParamSpec, Schema, TensorSpec from tests.helper_functions import pyfunc_serve_and_score_model @@ -560,6 +563,18 @@ def test_embeddings(tmp_path): assert preds == [[0.0]] * 100 +def test_embeddings_batch_size_azure(tmp_path, monkeypatch): + monkeypatch.setenv("OPENAI_API_TYPE", "azure") + mlflow.openai.save_model( + model="text-embedding-ada-002", + task=openai.Embedding, + path=tmp_path, + ) + model = mlflow.pyfunc.load_model(tmp_path) + + assert model._model_impl.api_config.batch_size == 16 + + def test_embeddings_pyfunc_server_and_score(tmp_path): mlflow.openai.save_model( model="text-embedding-ada-002", @@ -594,3 +609,42 @@ def test_spark_udf_embeddings(tmp_path, spark): ) df = df.withColumn("z", udf("x")).toPandas() assert df["z"].tolist() == [[0.0], [0.0]] + + +def test_inference_params(tmp_path): + mlflow.openai.save_model( + model="text-embedding-ada-002", + task=openai.Embedding, + path=tmp_path, + signature=ModelSignature( + inputs=Schema([ColSpec(type="string", name=None)]), + outputs=Schema([TensorSpec(type=np.dtype("float64"), shape=(-1,))]), + params=ParamSchema([ParamSpec(name="batch_size", dtype="long", default=16)]), + ), + ) + + model_info = mlflow.models.Model.load(tmp_path) + assert ( + len([p for p in model_info.signature.params if p.name == "batch_size" and p.default == 16]) + == 1 + ) + + model = mlflow.pyfunc.load_model(tmp_path) + data = pd.DataFrame({"text": ["a", "b"]}) + preds = model.predict(data, params={"batch_size": 5}) + assert preds == [[0.0], [0.0]] + + +def test_inference_params_overlap(tmp_path): + with pytest.raises(mlflow.MlflowException, match=r"any of \['prefix'\] as parameters"): + mlflow.openai.save_model( + model="text-davinci-003", + task=openai.Completion, + path=tmp_path, + prefix="Classify the following text's sentiment:", + signature=ModelSignature( + inputs=Schema([ColSpec(type="string", name=None)]), + outputs=Schema([ColSpec(type="string", name=None)]), + params=ParamSchema([ParamSpec(name="prefix", default=None, dtype="string")]), + ), + ) From 958b269031fd1c3cb28a6de9dd961eae2c6b4d96 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Tue, 17 Oct 2023 09:52:11 -0700 Subject: [PATCH 008/101] Token count latency docs (#9974) Signed-off-by: Prithvi Kannan --- docs/source/python_api/mlflow.metrics.rst | 8 +++++++- mlflow/models/evaluation/base.py | 15 +++++++-------- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/docs/source/python_api/mlflow.metrics.rst b/docs/source/python_api/mlflow.metrics.rst index 5503fd2cf85c5..fe90a9916edec 100644 --- a/docs/source/python_api/mlflow.metrics.rst +++ b/docs/source/python_api/mlflow.metrics.rst @@ -107,6 +107,12 @@ We provide the following builtin :py:class:`EvaluationMetric ` using the :py:func:`make_metric ` factory function .. autofunction:: mlflow.metrics.make_metric @@ -131,4 +137,4 @@ When using LLM based :py:class:`EvaluationMetric Date: Tue, 17 Oct 2023 09:57:25 -0700 Subject: [PATCH 009/101] Optional configure timeout for precanned metrics (#9972) Signed-off-by: Prithvi Kannan --- mlflow/metrics/genai/metric_definitions.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/mlflow/metrics/genai/metric_definitions.py b/mlflow/metrics/genai/metric_definitions.py index 61e013fda381b..b7561d8fa2032 100644 --- a/mlflow/metrics/genai/metric_definitions.py +++ b/mlflow/metrics/genai/metric_definitions.py @@ -15,6 +15,7 @@ def correctness( model: Optional[str] = None, metric_version: Optional[str] = None, examples: Optional[List[EvaluationExample]] = None, + judge_request_timeout=60, ) -> EvaluationMetric: """ This function will create a genai metric used to evaluate the correctness of an LLM using the @@ -33,6 +34,8 @@ def correctness( :param examples: (Optional) Provide a list of examples to help the judge model evaluate the correctness. It is highly recommended to add examples to be used as a reference to evaluate the new results. + :param judge_request_timeout: (Optional) The timeout in seconds for the judge API request. + Defaults to 60 seconds. :return: A metric object """ if metric_version is None: @@ -68,6 +71,7 @@ def correctness( parameters=correctness_class_module.parameters, aggregations=["mean", "variance", "p90"], greater_is_better=True, + judge_request_timeout=judge_request_timeout, ) @@ -76,6 +80,7 @@ def strict_correctness( model: Optional[str] = None, metric_version: Optional[str] = None, examples: Optional[List[EvaluationExample]] = None, + judge_request_timeout=60, ) -> EvaluationMetric: """ This function will create a genai metric used to evaluate the strict correctness of an LLM @@ -98,6 +103,8 @@ def strict_correctness( :param examples: (Optional) Provide a list of examples to help the judge model evaluate the strict correctness. It is highly recommended to add examples to be used as a reference to evaluate the new results. + :param judge_request_timeout: (Optional) The timeout in seconds for the judge API request. + Defaults to 60 seconds. :return: A metric object """ if metric_version is None: @@ -133,6 +140,7 @@ def strict_correctness( parameters=strict_correctness_class_module.parameters, aggregations=["mean", "variance", "p90"], greater_is_better=True, + judge_request_timeout=judge_request_timeout, ) @@ -141,6 +149,7 @@ def relevance( model: Optional[str] = None, metric_version: Optional[str] = None, examples: Optional[List[EvaluationExample]] = None, + judge_request_timeout=60, ) -> EvaluationMetric: """ This function will create a genai metric used to evaluate the relevance of an LLM using the @@ -159,6 +168,8 @@ def relevance( :param examples: (Optional) Provide a list of examples to help the judge model evaluate the relevance. It is highly recommended to add examples to be used as a reference to evaluate the new results. + :param judge_request_timeout: (Optional) The timeout in seconds for the judge API request. + Defaults to 60 seconds. :return: A metric object """ if metric_version is None: @@ -194,4 +205,5 @@ def relevance( parameters=relevance_class_module.parameters, aggregations=["mean", "variance", "p90"], greater_is_better=True, + judge_request_timeout=judge_request_timeout, ) From 5f08435548c839b1589ba8dd18db735cd74e98f3 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Tue, 17 Oct 2023 10:09:45 -0700 Subject: [PATCH 010/101] Advise to use temperature 0 for judge (#9975) Signed-off-by: Prithvi Kannan --- mlflow/metrics/genai/genai_metric.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index bf84ad4ef2458..b9cf842720148 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -104,7 +104,9 @@ def make_genai_metric( the metric. These grading_context_columns are used by the LLM as a judge as additional information to compute the metric. The columns are extracted from the input dataset or output predictions based on col_mapping in evaluator_config. - :param parameters: (Optional) Parameters for the llm used to compute the metric. + :param parameters: (Optional) Parameters for the LLM used to compute the metric. By default, we + set the temperature to 0.0, max_tokens to 200, and top_p to 1.0. We recommend + setting the temperature to 0.0 for the LLM used as a judge to ensure consistent results. :param aggregations: (Optional) The list of options to aggregate the scores. Currently supported options are: min, max, mean, median, variance, p90. :param greater_is_better: (Optional) Whether the metric is better when it is greater. From 2158f3ce96ab4e0e41de2e833cae1c962a14c684 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Tue, 17 Oct 2023 10:11:21 -0700 Subject: [PATCH 011/101] Add legal warning about third party LLM judge (#9973) Signed-off-by: Prithvi Kannan --- docs/source/python_api/mlflow.metrics.rst | 2 +- mlflow/metrics/genai/genai_metric.py | 5 ++++- mlflow/metrics/genai/metric_definitions.py | 12 +++++++++--- 3 files changed, 14 insertions(+), 5 deletions(-) diff --git a/docs/source/python_api/mlflow.metrics.rst b/docs/source/python_api/mlflow.metrics.rst index fe90a9916edec..cc086cfe8105f 100644 --- a/docs/source/python_api/mlflow.metrics.rst +++ b/docs/source/python_api/mlflow.metrics.rst @@ -117,7 +117,7 @@ Users create their own :py:class:`EvaluationMetric ` for evaluating text models. These metrics use an LLM to evaluate the quality of a model's output text. The following factory functions help you customize the intelligent metric to your use case. +We provide the following pre-canned "intelligent" :py:class:`EvaluationMetric ` for evaluating text models. These metrics use an LLM to evaluate the quality of a model's output text. Note that your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. The following factory functions help you customize the intelligent metric to your use case. .. autofunction:: mlflow.metrics.correctness diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index b9cf842720148..2d59d473f8d1e 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -99,7 +99,10 @@ def make_genai_metric( :param grading_prompt: Grading criteria of the metric. :param examples: (Optional) Examples of the metric. :param version: (Optional) Version of the metric. Currently supported versions are: v1. - :param model: (Optional) Model uri of the metric. + :param model: (Optional) Model uri of the of an openai or gateway judge model in the format of + "openai:/gpt-4" or "gateway:/my-route". Defaults to + "openai:/gpt-3.5-turbo-16k". Your use of a third party LLM service (e.g., OpenAI) for + evaluation may be subject to and governed by the LLM service's terms of use. :param grading_context_columns: (Optional) grading_context_columns required to compute the metric. These grading_context_columns are used by the LLM as a judge as additional information to compute the metric. The columns are extracted from the input dataset or diff --git a/mlflow/metrics/genai/metric_definitions.py b/mlflow/metrics/genai/metric_definitions.py index b7561d8fa2032..7835fb43002d2 100644 --- a/mlflow/metrics/genai/metric_definitions.py +++ b/mlflow/metrics/genai/metric_definitions.py @@ -28,7 +28,9 @@ def correctness( An MlflowException will be raised if the specified version for this metric does not exist. - :param model: (Optional) The model that will be used to evaluate this metric. Defaults to GPT-4. + :param model: (Optional) The model that will be used to evaluate this metric. Defaults to + gpt-3.5-turbo-16k. Your use of a third party LLM service (e.g., OpenAI) for evaluation may + be subject to and governed by the LLM service's terms of use. :param metric_version: (Optional) The version of the correctness metric to use. Defaults to the latest version. :param examples: (Optional) Provide a list of examples to help the judge model evaluate the @@ -97,7 +99,9 @@ def strict_correctness( An MlflowException will be raised if the specified version for this metric does not exist. - :param model: (Optional) The model that will be used to evaluate this metric. Defaults to GPT-4. + :param model: (Optional) The model that will be used to evaluate this metric. Defaults to + gpt-3.5-turbo-16k. Your use of a third party LLM service (e.g., OpenAI) for evaluation may + be subject to and governed by the LLM service's terms of use. :param metric_version: (Optional) The version of the strict correctness metric to use. Defaults to the latest version. :param examples: (Optional) Provide a list of examples to help the judge model evaluate the @@ -162,7 +166,9 @@ def relevance( An MlflowException will be raised if the specified version for this metric does not exist. - :param model: (Optional) The model that will be used to evaluate this metric. Defaults to GPT-4. + :param model: (Optional) The model that will be used to evaluate this metric. Defaults to + gpt-3.5-turbo-16k. Your use of a third party LLM service (e.g., OpenAI) for evaluation may + be subject to and governed by the LLM service's terms of use. :param metric_version: (Optional) The version of the relevance metric to use. Defaults to the latest version. :param examples: (Optional) Provide a list of examples to help the judge model evaluate the From adce81e27ac7f06a807f53d14ff7a4c8aa23bb0f Mon Sep 17 00:00:00 2001 From: Liang Zhang Date: Tue, 17 Oct 2023 13:57:08 -0700 Subject: [PATCH 012/101] Fix `targets` and `predictions` error message in mlflow evaluate (#9958) Signed-off-by: Liang Zhang --- mlflow/models/evaluation/base.py | 48 +++++----- tests/evaluate/test_evaluation.py | 146 ++++++++++++++++++------------ 2 files changed, 115 insertions(+), 79 deletions(-) diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 0f6a5608e13ba..73a29a82beb45 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -1641,14 +1641,34 @@ def pred_sample(eval_df, _builtin_metrics, _artifacts_dir): _EnvManager.validate(env_manager) + # If Dataset is provided, the targets and predictions can only be specified by the Dataset, + # not the targets and predictions parameters of the mlflow.evaluate() API. + if isinstance(data, Dataset) and targets is not None: + raise MlflowException( + message="The top-level targets parameter should not be specified since a Dataset " + "is used. Please only specify the targets column name in the Dataset. For example: " + "`data = mlflow.data.from_pandas(df=X.assign(y=y), targets='y')`. " + "Meanwhile, please specify `mlflow.evaluate(..., targets=None, ...)`.", + error_code=INVALID_PARAMETER_VALUE, + ) + if isinstance(data, Dataset) and predictions is not None: + raise MlflowException( + message="The top-level predictions parameter should not be specified since a Dataset " + "is used. Please only specify the predictions column name in the Dataset. For example:" + " `data = mlflow.data.from_pandas(df=X.assign(y=y), predictions='y')`" + "Meanwhile, please specify `mlflow.evaluate(..., predictions=None, ...)`.", + error_code=INVALID_PARAMETER_VALUE, + ) + if model_type in [_ModelType.REGRESSOR, _ModelType.CLASSIFIER]: if isinstance(data, Dataset): if getattr(data, "targets", None) is not None: targets = data.targets else: raise MlflowException( - message="The targets argument is required when data is a Dataset and does not " - "define targets.", + message="The targets column name must be specified in the provided Dataset " + f"for {model_type} models. For example: " + "`data = mlflow.data.from_pandas(df=X.assign(y=y), targets='y')`", error_code=INVALID_PARAMETER_VALUE, ) else: @@ -1693,29 +1713,13 @@ def pred_sample(eval_df, _builtin_metrics, _artifacts_dir): "parameter when model=None.", error_code=INVALID_PARAMETER_VALUE, ) - if predictions not in data.columns: - raise MlflowException( - message=f"The specified predictions column '{predictions}' is not " - "found in the specified data.", - error_code=INVALID_PARAMETER_VALUE, - ) elif isinstance(data, mlflow.data.pandas_dataset.PandasDataset): - # If data is a mlflow PandasDataset with predictions specified - # check that exact one predictions column is specified - if data.predictions is not None: - if predictions is not None and predictions != data.predictions: - raise MlflowException( - message="The predictions parameter must be None or the same as " - "data.predictions when data.predictions is specified. Found " - f"predictions='{predictions}', data.predictions='{data.predictions}'.", - error_code=INVALID_PARAMETER_VALUE, - ) - else: # predictions is None or predictions == data.predictions - pass # OK: exact one predictions column is specified - else: + # If data is a mlflow PandasDataset, data.predictions must be specified + if data.predictions is None: raise MlflowException( message="The predictions parameter must be specified with the provided " - "PandasDataset when model=None.", + "PandasDataset when model=None. For example: " + "`data = mlflow.data.from_pandas(df=X.assign(y=y), predictions='y')`", error_code=INVALID_PARAMETER_VALUE, ) else: diff --git a/tests/evaluate/test_evaluation.py b/tests/evaluate/test_evaluation.py index 88fca6e18c16d..8b61f979fe433 100644 --- a/tests/evaluate/test_evaluation.py +++ b/tests/evaluate/test_evaluation.py @@ -1,6 +1,7 @@ import io import json import os +import re import signal import uuid from collections import namedtuple @@ -605,7 +606,6 @@ def test_pandas_df_regressor_evaluation_mlflow_dataset_with_metric_prefix( eval_result = evaluate( linear_regressor_model_uri, data=mlflow_df, - targets="y", model_type="regressor", evaluators=["default"], evaluator_config={ @@ -633,7 +633,6 @@ def test_pandas_df_regressor_evaluation_mlflow_dataset(linear_regressor_model_ur eval_result = evaluate( linear_regressor_model_uri, data=mlflow_df, - targets="y", model_type="regressor", evaluators=["default"], ) @@ -671,25 +670,6 @@ def test_pandas_df_regressor_evaluation_mlflow_dataset_with_targets_from_dataset assert len(datasets[0].tags) == 0 -def test_pandas_df_regressor_evaluation_mlflow_dataset_without_targets(linear_regressor_model_uri): - data = sklearn.datasets.load_diabetes() - df = pd.DataFrame(data.data, columns=data.feature_names) - df["y"] = data.target - mlflow_df = from_pandas(df=df, source="my_src") - with mlflow.start_run(): - with pytest.raises( - MlflowException, - match="The targets argument is required when data is a Dataset and does not define " - "targets.", - ): - evaluate( - linear_regressor_model_uri, - data=mlflow_df, - model_type="regressor", - evaluators=["default"], - ) - - def test_dataset_name(): X, y = get_iris() d1 = EvaluationDataset(data=X, targets=y, name="a1") @@ -1317,6 +1297,79 @@ def test_evaluate_lightgbm_regressor(): assert "root_mean_squared_error" in run.data.metrics +def test_evaluate_with_targets_error_handling(): + import lightgbm as lgb + + X, y = sklearn.datasets.load_diabetes(return_X_y=True, as_frame=True) + X = X[::5] + y = y[::5] + lgb_data = lgb.Dataset(X, label=y) + model = lgb.train({"objective": "regression"}, lgb_data, num_boost_round=5) + ERROR_TYPE_1 = ( + "The top-level targets parameter should not be specified since a Dataset " + "is used. Please only specify the targets column name in the Dataset. For example: " + "`data = mlflow.data.from_pandas(df=X.assign(y=y), targets='y')`. " + "Meanwhile, please specify `mlflow.evaluate(..., targets=None, ...)`." + ) + ERROR_TYPE_2 = ( + "The targets column name must be specified in the provided Dataset " + "for regressor models. For example: " + "`data = mlflow.data.from_pandas(df=X.assign(y=y), targets='y')`" + ) + ERROR_TYPE_3 = "The targets argument must be specified for regressor models." + + pandas_dataset_no_targets = X + mlflow_dataset_no_targets = mlflow.data.from_pandas(df=X.assign(y=y)) + mlflow_dataset_with_targets = mlflow.data.from_pandas(df=X.assign(y=y), targets="y") + + with mlflow.start_run(): + with pytest.raises(MlflowException, match=re.escape(ERROR_TYPE_1)): + mlflow.evaluate( + model=model, + data=mlflow_dataset_with_targets, + model_type="regressor", + targets="y", + ) + + with pytest.raises(MlflowException, match=re.escape(ERROR_TYPE_1)): + mlflow.evaluate( + model=model, + data=mlflow_dataset_no_targets, + model_type="regressor", + targets="y", + ) + + with pytest.raises(MlflowException, match=re.escape(ERROR_TYPE_1)): + mlflow.evaluate( + model=model, + data=mlflow_dataset_with_targets, + model_type="question-answering", + targets="y", + ) + + with pytest.raises(MlflowException, match=re.escape(ERROR_TYPE_1)): + mlflow.evaluate( + model=model, + data=mlflow_dataset_no_targets, + model_type="question-answering", + targets="y", + ) + + with pytest.raises(MlflowException, match=re.escape(ERROR_TYPE_2)): + mlflow.evaluate( + model=model, + data=mlflow_dataset_no_targets, + model_type="regressor", + ) + + with pytest.raises(MlflowException, match=re.escape(ERROR_TYPE_3)): + mlflow.evaluate( + model=model, + data=pandas_dataset_no_targets, + model_type="regressor", + ) + + def test_evaluate_with_function_input_single_output(): import lightgbm as lgb @@ -1403,20 +1456,6 @@ def test_evaluate_with_static_mlflow_dataset_input(): assert "mean_squared_error" in run.data.metrics assert "root_mean_squared_error" in run.data.metrics - # redundent predictions parameter is allowed - with mlflow.start_run() as run: - mlflow.evaluate( - data=data, - model_type="regressor", - targets="y", - predictions="model_output", # same as data.predictions - ) - - run = mlflow.get_run(run.info.run_id) - assert "mean_absolute_error" in run.data.metrics - assert "mean_squared_error" in run.data.metrics - assert "root_mean_squared_error" in run.data.metrics - def test_evaluate_with_static_spark_dataset_unsupported(): data = sklearn.datasets.load_diabetes() @@ -1491,8 +1530,8 @@ def test_evaluate_with_static_dataset_error_handling_pandas_dataframe(): with pytest.raises( MlflowException, - match="The specified predictions column 'prediction' is not " - "found in the specified data.", + match="The specified pandas DataFrame does not contain the specified predictions" + " column 'prediction'.", ): mlflow.evaluate( data=X.assign(y=y, model_output=y), @@ -1506,34 +1545,27 @@ def test_evaluate_with_static_dataset_error_handling_pandas_dataset(): X, y = sklearn.datasets.load_diabetes(return_X_y=True, as_frame=True) X = X[::5] y = y[::5] - data = mlflow.data.from_pandas( + dataset_with_predictions = mlflow.data.from_pandas( df=X.assign(y=y, model_output=y), targets="y", predictions="model_output" ) + dataset_no_predictions = mlflow.data.from_pandas(df=X.assign(y=y, model_output=y), targets="y") + ERROR_MESSAGE = ( + "The top-level predictions parameter should not be specified since a Dataset is " + "used. Please only specify the predictions column name in the Dataset. For example: " + "`data = mlflow.data.from_pandas(df=X.assign(y=y), predictions='y')`" + "Meanwhile, please specify `mlflow.evaluate(..., predictions=None, ...)`." + ) with mlflow.start_run(): - with pytest.raises( - MlflowException, - match="The predictions parameter must be None or the same as " - "data.predictions when data.predictions is specified. Found predictions='y', " - "data.predictions='model_output'.", - ): + with pytest.raises(MlflowException, match=re.escape(ERROR_MESSAGE)): mlflow.evaluate( - data=data, + data=dataset_with_predictions, model_type="regressor", - targets="y", - predictions="y", # conflict with data.predictions + predictions="model_output", ) - # data.predictions cannot be missing - data = mlflow.data.from_pandas(df=X.assign(y=y, model_output=y), targets="y") - with mlflow.start_run(): - with pytest.raises( - MlflowException, - match="The predictions parameter must be specified with the " - "provided PandasDataset when model=None.", - ): + with pytest.raises(MlflowException, match=re.escape(ERROR_MESSAGE)): mlflow.evaluate( - data=data, + data=dataset_no_predictions, model_type="regressor", - targets="y", predictions="model_output", ) From b5e43feaadcd0c073f5c5261dba50f91d49e9ac4 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Tue, 17 Oct 2023 16:07:32 -0700 Subject: [PATCH 013/101] fix error message columns (#9979) Signed-off-by: Ann Zhang --- mlflow/models/evaluation/default_evaluator.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index 95658ec51f75b..2e2b5b37626fe 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1162,7 +1162,10 @@ def _get_args_for_metrics(self, extra_metric, eval_df): output_column_name = self.evaluator_config.get( _Y_PREDICTED_OUTPUT_COLUMN_NAME, "output" ) - output_columns = list(self.other_output_columns.columns) + if self.other_output_columns: + output_columns = list(self.other_output_columns.columns) + else: + output_columns = [] input_columns = list(input_df.columns) raise MlflowException( "Error: Metric Calculation Failed\n" From a4db4ee826765f0365944f83124af8840c72f4d8 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Tue, 17 Oct 2023 16:26:47 -0700 Subject: [PATCH 014/101] Change temperature in test to 0.0 (#9978) Signed-off-by: Prithvi Kannan --- tests/metrics/genai/prompts/test_v1.py | 4 ++-- tests/metrics/genai/test_genai_metrics.py | 10 +++++----- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/metrics/genai/prompts/test_v1.py b/tests/metrics/genai/prompts/test_v1.py index 4605688e3fd9e..68f5ff7738c60 100644 --- a/tests/metrics/genai/prompts/test_v1.py +++ b/tests/metrics/genai/prompts/test_v1.py @@ -41,11 +41,11 @@ def test_evaluation_model_output(): ), ], model="gateway:/gpt-4", - parameters={"temperature": 1.0}, + parameters={"temperature": 0.0}, ).to_dict() assert model1["model"] == "gateway:/gpt-4" - assert model1["parameters"] == {"temperature": 1.0} + assert model1["parameters"] == {"temperature": 0.0} grading_context = {"ground_truth": "This is an output"} args_string = "Additional information used by the model:\n" + "\n".join( diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index 4d5db0ef08d76..fd472d9e473d0 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -179,7 +179,7 @@ def test_make_genai_metric_correct_response(): examples=[mlflow_example], model="gateway:/gpt-3.5-turbo", grading_context_columns=["targets"], - parameters={"temperature": 1.0}, + parameters={"temperature": 0.0}, greater_is_better=True, aggregations=["mean", "variance", "p90"], ) @@ -278,7 +278,7 @@ def test_make_genai_metric_incorrect_response(): examples=[mlflow_example], model="gateway:/gpt-3.5-turbo", grading_context_columns=["targets"], - parameters={"temperature": 1.0}, + parameters={"temperature": 0.0}, greater_is_better=True, aggregations=["mean", "variance", "p90"], ) @@ -312,7 +312,7 @@ def test_make_genai_metric_multiple(): examples=[mlflow_example], model="gateway:/gpt-3.5-turbo", grading_context_columns=["targets"], - parameters={"temperature": 1.0}, + parameters={"temperature": 0.0}, greater_is_better=True, aggregations=["mean", "variance", "p90"], ) @@ -386,7 +386,7 @@ def test_make_genai_metric_failure(): examples=[example], model="model", grading_context_columns=["targets"], - parameters={"temperature": 1.0}, + parameters={"temperature": 0.0}, greater_is_better=True, aggregations=["mean"], ) @@ -417,7 +417,7 @@ def test_make_genai_metric_failure(): examples=[example], model="openai:/gpt-3.5-turbo", grading_context_columns=["targets"], - parameters={"temperature": 1.0}, + parameters={"temperature": 0.0}, greater_is_better=True, aggregations=["random-fake"], ) From bf63048d03609177c00ba114fc25c13fb9fd3c6e Mon Sep 17 00:00:00 2001 From: Serena Ruan <82044803+serena-ruan@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:09:12 +0800 Subject: [PATCH 015/101] fix spark connect (#9986) Signed-off-by: Serena Ruan --- mlflow/pyfunc/__init__.py | 5 +++++ tests/pyfunc/test_spark_connect.py | 17 +++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/mlflow/pyfunc/__init__.py b/mlflow/pyfunc/__init__.py index 1c8ffd3102f6a..7462cb13ace7f 100644 --- a/mlflow/pyfunc/__init__.py +++ b/mlflow/pyfunc/__init__.py @@ -1473,6 +1473,8 @@ def _predict_row_batch(predict_fn, args): pandas.DataFrame if isinstance(result_type, SparkStructType) else pandas.Series ) + tracking_uri = mlflow.get_tracking_uri() + @pandas_udf(result_type) def udf( iterator: Iterator[Tuple[Union[pandas.Series, pandas.DataFrame], ...]] @@ -1492,6 +1494,9 @@ def udf( if mlflow_testing: _MLFLOW_TESTING.set(mlflow_testing) scoring_server_proc = None + # set tracking_uri inside udf so that with spark_connect + # we can load the model from correct path + mlflow.set_tracking_uri(tracking_uri) if env_manager != _EnvManager.LOCAL: if should_use_spark_to_broadcast_file: diff --git a/tests/pyfunc/test_spark_connect.py b/tests/pyfunc/test_spark_connect.py index a6defa70d12fd..feedfaddc24ba 100644 --- a/tests/pyfunc/test_spark_connect.py +++ b/tests/pyfunc/test_spark_connect.py @@ -41,3 +41,20 @@ def test_spark_udf_spark_connect_unsupported_env_manager(spark, tmp_path, env_ma match=f"Environment manager {env_manager!r} is not supported", ): mlflow.pyfunc.spark_udf(spark, str(tmp_path), env_manager=env_manager) + + +def test_spark_udf_spark_connect_with_model_logging(spark, tmp_path): + X, y = load_iris(return_X_y=True, as_frame=True) + model = LogisticRegression().fit(X, y) + + mlflow.set_tracking_uri(tmp_path.joinpath("mlruns").as_uri()) + mlflow.set_experiment("test") + with mlflow.start_run(): + signature = mlflow.models.infer_signature(X, y) + model_info = mlflow.sklearn.log_model(model, "model", signature=signature) + + udf = mlflow.pyfunc.spark_udf(spark, model_info.model_uri, env_manager="local") + X_test = X.head(5) + sdf = spark.createDataFrame(X_test) + preds = sdf.select(udf(*X_test.columns).alias("preds")).toPandas()["preds"] + np.testing.assert_array_almost_equal(preds, model.predict(X_test)) From 9443f81f9cdf9e846a4ced16b17cb9bae9b2ced4 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Wed, 18 Oct 2023 17:37:26 +0900 Subject: [PATCH 016/101] Report memory usage in tests (#9988) Signed-off-by: harupy --- conftest.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/conftest.py b/conftest.py index 61ae4b720edd3..6f56d52f4953c 100644 --- a/conftest.py +++ b/conftest.py @@ -55,6 +55,22 @@ def pytest_runtest_setup(item): pytest.skip("use `--requires-ssh` to run this test") +@pytest.hookimpl(hookwrapper=True) +def pytest_report_teststatus(report, config): + outcome = yield + if report.when == "call": + try: + import psutil + except ImportError: + return + + (*rest, result) = outcome.get_result() + mem = psutil.virtual_memory() + used = mem.used / 1024**3 + total = mem.total / 1024**3 + outcome.force_result((*rest, f"{result} | mem {used:.1f}/{total:.1f} GB")) + + @pytest.hookimpl(hookwrapper=True) def pytest_ignore_collect(path, config): outcome = yield From 4e9c891288b7d27c00f828177737dbe140d9cae7 Mon Sep 17 00:00:00 2001 From: lightnessofbein <39873967+lightnessofbein@users.noreply.github.com> Date: Wed, 18 Oct 2023 14:50:23 +0200 Subject: [PATCH 017/101] changed pytest_report_teststatus to report disk during tests (#9994) Signed-off-by: Serhii Fedash Signed-off-by: harupy Co-authored-by: harupy --- conftest.py | 19 ++++++++++++++++--- tests/examples/test_examples.py | 10 +--------- tests/helper_functions.py | 5 ----- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/conftest.py b/conftest.py index 6f56d52f4953c..640680cb3dc2a 100644 --- a/conftest.py +++ b/conftest.py @@ -66,9 +66,22 @@ def pytest_report_teststatus(report, config): (*rest, result) = outcome.get_result() mem = psutil.virtual_memory() - used = mem.used / 1024**3 - total = mem.total / 1024**3 - outcome.force_result((*rest, f"{result} | mem {used:.1f}/{total:.1f} GB")) + mem_used = mem.used / 1024**3 + mem_total = mem.total / 1024**3 + + disk = psutil.disk_usage("/") + disk_used = disk.used / 1024**3 + disk_total = disk.total / 1024**3 + outcome.force_result( + ( + *rest, + ( + f"{result} | " + f"MEM {mem_used:.1f}/{mem_total:.1f} GB | " + f"DISK {disk_used:.1f}/{disk_total:.1f} GB" + ), + ) + ) @pytest.hookimpl(hookwrapper=True) diff --git a/tests/examples/test_examples.py b/tests/examples/test_examples.py index 18a95257309db..14f041a442621 100644 --- a/tests/examples/test_examples.py +++ b/tests/examples/test_examples.py @@ -11,7 +11,7 @@ from mlflow.utils import process from mlflow.utils.virtualenv import _get_mlflow_virtualenv_root -from tests.helper_functions import clear_hub_cache, get_free_disk_space_in_GiB +from tests.helper_functions import clear_hub_cache from tests.integration.utils import invoke_cli_runner EXAMPLES_DIR = "examples" @@ -28,14 +28,6 @@ def replace_mlflow_with_dev_version(yml_path: Path) -> None: yml_path.write_text(new_src) -@pytest.fixture(autouse=True) -def report_free_disk_space(capsys): - yield - - with capsys.disabled(): - sys.stdout.write(f" | Free disk space: {get_free_disk_space_in_GiB():.1f} GiB") - - @pytest.fixture(autouse=True) def clean_up_mlflow_virtual_environments(): yield diff --git a/tests/helper_functions.py b/tests/helper_functions.py index 79dd26f696e67..1de4d1b5ceba5 100644 --- a/tests/helper_functions.py +++ b/tests/helper_functions.py @@ -4,7 +4,6 @@ import numbers import os import random -import shutil import signal import socket import subprocess @@ -636,7 +635,3 @@ def clear_hub_cache(): except ImportError: # Local import check for mlflow-skinny not including huggingface_hub pass - - -def get_free_disk_space_in_GiB(): - return shutil.disk_usage("/").free / (1024**3) From 532850932715cb83e2e88132e7d97785ca048f45 Mon Sep 17 00:00:00 2001 From: Sunish Sheth Date: Wed, 18 Oct 2023 09:37:25 -0700 Subject: [PATCH 018/101] Adding support for azure openai (#9982) Signed-off-by: Sunish Sheth --- mlflow/metrics/genai/model_utils.py | 12 ++++- tests/metrics/genai/test_model_utils.py | 63 +++++++++++++++++++++++++ 2 files changed, 74 insertions(+), 1 deletion(-) diff --git a/mlflow/metrics/genai/model_utils.py b/mlflow/metrics/genai/model_utils.py index 5ee4edc845c57..3933727f0e51a 100644 --- a/mlflow/metrics/genai/model_utils.py +++ b/mlflow/metrics/genai/model_utils.py @@ -54,13 +54,23 @@ def _call_openai_api(openai_uri, payload, timeout): error_code=INVALID_PARAMETER_VALUE, ) + config = {"openai_api_key": os.environ["OPENAI_API_KEY"]} + if "OPENAI_API_BASE" in os.environ: + config["openai_api_base"] = os.environ["OPENAI_API_BASE"] + if "OPENAI_API_TYPE" in os.environ: + config["openai_api_type"] = os.environ["OPENAI_API_TYPE"] + if "OPENAI_API_VERSION" in os.environ: + config["openai_api_version"] = os.environ["OPENAI_API_VERSION"] + if "OPENAI_DEPLOYMENT_NAME" in os.environ: + config["openai_deployment_name"] = os.environ["OPENAI_DEPLOYMENT_NAME"] + route_config = RouteConfig( name="openai", route_type=ROUTE_TYPE, model={ "name": openai_uri, "provider": "openai", - "config": {"openai_api_key": os.environ["OPENAI_API_KEY"]}, + "config": config, }, ) openai_provider = OpenAIProvider(route_config) diff --git a/tests/metrics/genai/test_model_utils.py b/tests/metrics/genai/test_model_utils.py index c79fb06f1dad8..049be652ac0e4 100644 --- a/tests/metrics/genai/test_model_utils.py +++ b/tests/metrics/genai/test_model_utils.py @@ -19,6 +19,19 @@ def set_envs(monkeypatch): ) +@pytest.fixture +def set_azure_envs(monkeypatch): + monkeypatch.setenvs( + { + "OPENAI_API_KEY": "test", + "OPENAI_API_TYPE": "azure", + "OPENAI_API_VERSION": "2023-05-15", + "OPENAI_API_BASE": "https://openai-for.openai.azure.com/", + "OPENAI_DEPLOYMENT_NAME": "test-openai", + } + ) + + def test_parse_model_uri(): prefix, suffix = _parse_model_uri("openai:/gpt-3.5-turbo") @@ -101,6 +114,56 @@ def json(self): ) +def test_score_model_azure_openai(set_azure_envs): + class MockResponse(Response): + def __init__(self, json_data, status_code): + super().__init__() + self.json_data = json_data + self.status_code = status_code + self.headers = {"Content-Type": "application/json"} + + def json(self): + return self.json_data + + resp = { + "id": "chatcmpl-abc123", + "object": "chat.completion", + "created": 1677858242, + "model": "gpt-3.5-turbo-0301", + "usage": { + "prompt_tokens": 13, + "completion_tokens": 7, + "total_tokens": 20, + }, + "choices": [ + { + "message": { + "role": "assistant", + "content": "\n\nThis is a test!", + }, + "finish_reason": "stop", + "index": 0, + } + ], + "headers": {"Content-Type": "application/json"}, + } + + with mock.patch("requests.post", return_value=MockResponse(resp, 200)) as mock_post: + score_model_on_payload( + "openai:/gpt-3.5-turbo", {"prompt": "my prompt", "temperature": 0.1}, 10 + ) + mock_post.assert_called_once_with( + url="https://openai-for.openai.azure.com/openai/deployments/test-openai/chat/" + "completions?api-version=2023-05-15", + headers={"api-key": "test"}, + json={ + "temperature": 0.2, + "messages": [{"role": "user", "content": "my prompt"}], + }, + timeout=10, + ) + + def test_score_model_gateway(): expected_output = { "candidates": [ From 8556b85f6be3d8481d3286fbe4693892cf7b9857 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Wed, 18 Oct 2023 10:43:14 -0700 Subject: [PATCH 019/101] Docstrings for metrics (#9995) Signed-off-by: Prithvi Kannan --- mlflow/metrics/__init__.py | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/mlflow/metrics/__init__.py b/mlflow/metrics/__init__.py index 1cfdd80336d03..ae00b0a9603ca 100644 --- a/mlflow/metrics/__init__.py +++ b/mlflow/metrics/__init__.py @@ -41,7 +41,7 @@ greater_is_better=False, name="latency", ) -""" +latency.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for calculating latency. Latency is determined by the time it takes to generate a @@ -55,7 +55,7 @@ greater_is_better=True, name="token_count", ) -""" +token_count.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for calculating token_count. Token count is calculated using tiktoken by using the @@ -69,7 +69,7 @@ long_name="toxicity/roberta-hate-speech-dynabench-r4", version="v1", ) -""" +toxicity.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for evaluating `toxicity`_ using the model `roberta-hate-speech-dynabench-r4`_, @@ -113,7 +113,7 @@ name="flesch_kincaid_grade_level", version="v1", ) -""" +flesch_kincaid_grade_level.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for calculating `flesch kincaid grade level`_ using `textstat`_. @@ -136,7 +136,7 @@ long_name="automated_readability_index_grade_level", version="v1", ) -""" +ari_grade_level.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for calculating `automated readability index`_ using `textstat`_. @@ -156,7 +156,7 @@ exact_match = make_metric( eval_fn=_accuracy_eval_fn, greater_is_better=True, name="exact_match", version="v1" ) -""" +exact_match.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for calculating `accuracy`_ using sklearn. @@ -174,7 +174,7 @@ name="rouge1", version="v1", ) -""" +rouge1.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for evaluating `rouge1`_. @@ -194,7 +194,7 @@ name="rouge2", version="v1", ) -""" +rouge2.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for evaluating `rouge2`_. @@ -214,7 +214,7 @@ name="rougeL", version="v1", ) -""" +rougeL.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for evaluating `rougeL`_. @@ -234,7 +234,7 @@ name="rougeLsum", version="v1", ) -""" +rougeLsum.__doc__ = """ .. Note:: Experimental: This metric may change or be removed in a future release without warning. A metric for evaluating `rougeLsum`_. @@ -255,7 +255,7 @@ greater_is_better=False, name="mean_absolute_error", ) -""" +mae.__doc__ = """ A metric for evaluating `mae`_. This metric computes an aggregate score for the mean absolute error for regression. @@ -268,7 +268,7 @@ greater_is_better=False, name="mean_squared_error", ) -""" +mse.__doc__ = """ A metric for evaluating `mse`_. This metric computes an aggregate score for the mean squared error for regression. @@ -281,7 +281,7 @@ greater_is_better=False, name="root_mean_squared_error", ) -""" +rmse.__doc__ = """ A metric for evaluating the square root of `mse`_. This metric computes an aggregate score for the root mean absolute error for regression. @@ -294,7 +294,7 @@ greater_is_better=True, name="r2_score", ) -""" +r2_score.__doc__ = """ A metric for evaluating `r2_score`_. This metric computes an aggregate score for the coefficient of determination. R2 ranges from @@ -309,7 +309,7 @@ greater_is_better=False, name="max_error", ) -""" +max_error.__doc__ = """ A metric for evaluating `max_error`_. This metric computes an aggregate score for the maximum residual error for regression. @@ -322,7 +322,7 @@ greater_is_better=False, name="mean_absolute_percentage_error", ) -""" +mape.__doc__ = """ A metric for evaluating `mape`_. This metric computes an aggregate score for the mean absolute percentage error for regression. @@ -333,7 +333,7 @@ # Binary Classification Metrics recall_score = make_metric(eval_fn=_recall_eval_fn, greater_is_better=True, name="recall_score") -""" +recall_score.__doc__ = """ A metric for evaluating `recall`_ for classification. This metric computes an aggregate score between 0 and 1 for the recall of a classification task. @@ -344,7 +344,7 @@ precision_score = make_metric( eval_fn=_precision_eval_fn, greater_is_better=True, name="precision_score" ) -""" +precision_score.__doc__ = """ A metric for evaluating `precision`_ for classification. This metric computes an aggregate score between 0 and 1 for the precision of @@ -354,7 +354,7 @@ """ f1_score = make_metric(eval_fn=_f1_score_eval_fn, greater_is_better=True, name="f1_score") -""" +f1_score.__doc__ = """ A metric for evaluating `f1_score`_ for binary classification. This metric computes an aggregate score between 0 and 1 for the F1 score (F-measure) of a From cf6b0e4b86597b33684d42ab6b7a4d6e139ba64c Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Wed, 18 Oct 2023 11:11:52 -0700 Subject: [PATCH 020/101] Include other predicted columns in eval table (#9998) Signed-off-by: Prithvi Kannan --- mlflow/models/evaluation/default_evaluator.py | 4 ++++ tests/evaluate/test_default_evaluator.py | 1 + 2 files changed, 5 insertions(+) diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index 2e2b5b37626fe..e1484bfbf8235 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1452,6 +1452,10 @@ def _log_eval_table(self): else: data = self.dataset.features_data.assign(outputs=self.y_pred) + # include other_output_columns in the eval table + if self.other_output_columns is not None: + data = data.assign(**self.other_output_columns) + columns = {} for metric_name, metric_value in self.metrics_values.items(): scores = metric_value.scores diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index ce6ffa30883a9..bf0a11a44dcfe 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -2772,6 +2772,7 @@ def test_eval_df(predictions, targets, metrics, inputs, truth, context): "truth", "targets", "outputs", + "context", "token_count", "toxicity/v1/score", "perplexity/v1/score", From df8069cfdefc43438e4aca048496625cf7cbd398 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Wed, 18 Oct 2023 14:59:11 -0700 Subject: [PATCH 021/101] Rename correctness to answer similarity (#9944) Signed-off-by: Ann Zhang --- docs/source/python_api/mlflow.metrics.rst | 10 ++--- .../evaluation/evaluate_with_llm_judge.py | 6 +-- mlflow/metrics/__init__.py | 4 +- mlflow/metrics/genai/metric_definitions.py | 36 ++++++++-------- mlflow/metrics/genai/prompts/v1.py | 43 ++++++++++--------- tests/metrics/genai/test_genai_metrics.py | 25 +++++------ 6 files changed, 63 insertions(+), 61 deletions(-) diff --git a/docs/source/python_api/mlflow.metrics.rst b/docs/source/python_api/mlflow.metrics.rst index cc086cfe8105f..35dd22cb778b3 100644 --- a/docs/source/python_api/mlflow.metrics.rst +++ b/docs/source/python_api/mlflow.metrics.rst @@ -12,7 +12,7 @@ The following code demonstrates how to use :py:func:`mlflow.evaluate()` with an .. code-block:: python import mlflow - from mlflow.metrics import EvaluationExample, correctness + from mlflow.metrics import EvaluationExample, answer_similarity eval_df = pd.DataFrame( { @@ -41,13 +41,13 @@ The following code demonstrates how to use :py:func:`mlflow.evaluate()` with an "engineers face when developing, training, and deploying machine learning models." }, ) - correctness_metric = correctness(examples=[example]) + answer_similarity_metric = answer_similarity(examples=[example]) results = mlflow.evaluate( logged_model.model_uri, eval_df, targets="ground_truth", model_type="question-answering", - extra_metrics=[correctness_metric], + extra_metrics=[answer_similarity_metric], ) Evaluation results are stored as :py:class:`MetricValue `. Aggregate results are logged to the MLflow run as metrics, while per-example results are logged to the MLflow run as artifacts in the form of an evaluation table. @@ -119,7 +119,7 @@ Users create their own :py:class:`EvaluationMetric ` for evaluating text models. These metrics use an LLM to evaluate the quality of a model's output text. Note that your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. The following factory functions help you customize the intelligent metric to your use case. -.. autofunction:: mlflow.metrics.correctness +.. autofunction:: mlflow.metrics.answer_similarity .. autofunction:: mlflow.metrics.strict_correctness @@ -137,4 +137,4 @@ When using LLM based :py:class:`EvaluationMetric EvaluationMetric: """ - This function will create a genai metric used to evaluate the correctness of an LLM using the - model provided. Correctness will be assessed by the similarity in meaning and description to - the ``ground_truth``. + This function will create a genai metric used to evaluate the answer similarity of an LLM + using the model provided. Answer similarity will be assessed by the semantic similarity of the + output to the ``ground_truth``, which should be specified in the ``target`` column. - The ``ground_truth`` eval_arg must be provided as part of the input dataset or output + The ``target`` eval_arg must be provided as part of the input dataset or output predictions. This can be mapped to a column of a different name using the a ``col_mapping`` in the ``evaluator_config``. @@ -31,10 +31,10 @@ def correctness( :param model: (Optional) The model that will be used to evaluate this metric. Defaults to gpt-3.5-turbo-16k. Your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. - :param metric_version: (Optional) The version of the correctness metric to use. + :param metric_version: (Optional) The version of the answer similarity metric to use. Defaults to the latest version. :param examples: (Optional) Provide a list of examples to help the judge model evaluate the - correctness. It is highly recommended to add examples to be used as a reference to + answer similarity. It is highly recommended to add examples to be used as a reference to evaluate the new results. :param judge_request_timeout: (Optional) The timeout in seconds for the judge API request. Defaults to 60 seconds. @@ -42,35 +42,35 @@ def correctness( """ if metric_version is None: metric_version = _get_latest_metric_version() - class_name = f"mlflow.metrics.genai.prompts.{metric_version}.CorrectnessMetric" + class_name = f"mlflow.metrics.genai.prompts.{metric_version}.AnswerSimilarityMetric" try: - correctness_class_module = _get_class_from_string(class_name) + answer_similarity_class_module = _get_class_from_string(class_name) except ModuleNotFoundError: raise MlflowException( - f"Failed to find correctness metric for version {metric_version}." + f"Failed to find answer similarity metric for version {metric_version}." f" Please check the version", error_code=INVALID_PARAMETER_VALUE, ) from None except Exception as e: raise MlflowException( - f"Failed to construct correctness metric {metric_version}. Error: {e!r}", + f"Failed to construct answer similarity metric {metric_version}. Error: {e!r}", error_code=INTERNAL_ERROR, ) from None if examples is None: - examples = correctness_class_module.default_examples + examples = answer_similarity_class_module.default_examples if model is None: - model = correctness_class_module.default_model + model = answer_similarity_class_module.default_model return make_genai_metric( - name="correctness", - definition=correctness_class_module.definition, - grading_prompt=correctness_class_module.grading_prompt, + name="answer_similarity", + definition=answer_similarity_class_module.definition, + grading_prompt=answer_similarity_class_module.grading_prompt, examples=examples, version=metric_version, model=model, - grading_context_columns=correctness_class_module.grading_context_columns, - parameters=correctness_class_module.parameters, + grading_context_columns=answer_similarity_class_module.grading_context_columns, + parameters=answer_similarity_class_module.parameters, aggregations=["mean", "variance", "p90"], greater_is_better=True, judge_request_timeout=judge_request_timeout, diff --git a/mlflow/metrics/genai/prompts/v1.py b/mlflow/metrics/genai/prompts/v1.py index 487102034980d..64349b2d71b97 100644 --- a/mlflow/metrics/genai/prompts/v1.py +++ b/mlflow/metrics/genai/prompts/v1.py @@ -88,23 +88,23 @@ def _format_examples(self): @dataclass -class CorrectnessMetric: +class AnswerSimilarityMetric: definition = ( - "Correctness is evaluated on the proximity of the provided output to the ground truth " - "in terms of meaning and description similarity. Scores can be assigned from 1 to 5 based " - "on the gradual similarity in meaning and description to the ground truth." + "Answer similarity is evaluated on the degree of semantic similarity of the provided " + "output to the provided targets, which is the ground truth. Scores can be assigned based " + "on the gradual similarity in meaning and description to the provided targets, where a " + "higher score indicates greater alignment between the provided output and provided targets." ) grading_prompt = ( - "Correctness: Below are the details for different scores:" - "- Score 1: the output is completely incorrect, doesn't mention anything related to the " - "input or is completely contrary to the provided ground truth." - "- Score 2: the output provides some relevance to the input and answers one aspect of the " - "question as in the ground truth." - "- Score 3: the output mostly answers the question but is missing or hallucinating on " - "one critical aspect." - "- Score 5: the output correctly answers the question and is not missing any major aspect " - "provided in the ground truth answer." + "Answer similarity: Below are the details for different scores:\n" + "- Score 1: the output has little to no semantic similarity to the provided targets.\n" + "- Score 2: the output displays partial semantic similarity to the provided targets on " + "some aspects.\n" + "- Score 3: the output has moderate semantic similarity to the provided targets.\n" + "- Score 4: the output aligns with the provided targets in most aspects and has " + "substantial semantic similarity.\n" + "- Score 5: the output closely aligns with the provided targets in all significant aspects." ) grading_context_columns = ["targets"] @@ -115,12 +115,11 @@ class CorrectnessMetric: input="What is MLflow?", output="MLflow is an open-source platform.", score=2, - justification="While the statement correctly identifies MLflow as an open-source platform, " - "it lacks some critical aspects mentioned in the ground truth. Specifically, it doesn't " - "provide information about MLflow's purpose in managing the end-to-end machine learning " - "lifecycle, its development by Databricks, and its focus on addressing challenges faced by " - "data scientists and machine learning engineers. Therefore, it answers one aspect of the " - "question but is missing several critical aspects provided in the ground truth.", + justification="The provided output is partially similar to the target, as it captures the " + "general idea that MLflow is an open-source platform. However, it lacks the comprehensive " + "details and context provided in the target about MLflow's purpose, development, and " + "challenges it addresses. Therefore, it demonstrates partial, but not complete, " + "semantic similarity.", grading_context={ "targets": "MLflow is an open-source platform for managing the end-to-end " "machine learning (ML) lifecycle. It was developed by Databricks, a company " @@ -137,8 +136,10 @@ class CorrectnessMetric: "including experiment tracking, model packaging, versioning, and deployment, simplifying " "the ML lifecycle.", score=4, - justification="The output effectively explains what MLflow is and its purpose. " - "Information about the developer of MLflow could be included for a 5-score.", + justification="The provided output aligns closely with the target. It covers various key " + "aspects mentioned in the target, including managing machine learning workflows, " + "experiment tracking, model packaging, versioning, and deployment. While it may not include" + " every single detail from the target, it demonstrates substantial semantic similarity.", grading_context={ "targets": "MLflow is an open-source platform for managing the end-to-end " "machine learning (ML) lifecycle. It was developed by Databricks, a company " diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index fd472d9e473d0..5d94fa3398e8d 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -15,12 +15,12 @@ make_genai_metric, ) from mlflow.metrics.genai.metric_definitions import ( - correctness, + answer_similarity, relevance, strict_correctness, ) from mlflow.metrics.genai.prompts.v1 import ( - CorrectnessMetric, + AnswerSimilarityMetric, RelevanceMetric, StrictCorrectnessMetric, ) @@ -512,7 +512,7 @@ def test_extract_score_and_justification(): def test_correctness_metric(): - correctness_metric = correctness( + correctness_metric = answer_similarity( model="gateway:/gpt-3.5-turbo", metric_version="v1", examples=[mlflow_example] ) @@ -534,8 +534,8 @@ def test_correctness_metric(): "sent to a machine\nlearning model, and you will be given an output that the model " "produced. You\nmay also be given additional information that was used by the model " "to generate the output.\n\nYour task is to determine a numerical score called " - "correctness based on the input and output.\nA definition of " - "correctness and a grading rubric are provided below.\nYou must use the " + "answer_similarity based on the input and output.\nA definition of " + "answer_similarity and a grading rubric are provided below.\nYou must use the " "grading rubric to determine your score. You must also justify your score." "\n\nExamples could be included below for reference. Make sure to use them as " "references and to\nunderstand them before completing the task.\n" @@ -543,8 +543,8 @@ def test_correctness_metric(): f"\nOutput:\n{mlflow_prediction}\n" "\nAdditional information used by the model:\nkey: targets\nvalue:\n" f"{mlflow_ground_truth}\n" - f"\nMetric definition:\n{CorrectnessMetric.definition}\n" - f"\nGrading rubric:\n{CorrectnessMetric.grading_prompt}\n" + f"\nMetric definition:\n{AnswerSimilarityMetric.definition}\n" + f"\nGrading rubric:\n{AnswerSimilarityMetric.grading_prompt}\n" "\nExamples:\n" f"\nInput:\n{mlflow_example.input}\n" f"\nOutput:\n{mlflow_example.output}\n" @@ -553,10 +553,10 @@ def test_correctness_metric(): f"\nscore: {mlflow_example.score}\n" f"justification: {mlflow_example.justification}\n \n" "\nYou must return the following fields in your response one below the other:\nscore: " - "Your numerical score for the model's correctness based on the " + "Your numerical score for the model's answer_similarity based on the " "rubric\njustification: Your step-by-step reasoning about the model's " - "correctness score\n ", - **CorrectnessMetric.parameters, + "answer_similarity score\n ", + **AnswerSimilarityMetric.parameters, } assert metric_value.scores == [3] @@ -569,9 +569,10 @@ def test_correctness_metric(): } with pytest.raises( - MlflowException, match="Failed to find correctness metric for version non-existent-version" + MlflowException, + match="Failed to find answer similarity metric for version non-existent-version", ): - correctness_metric = correctness( + answer_similarity( model="gateway:/gpt-3.5-turbo", metric_version="non-existent-version", examples=[mlflow_example], From 378de570c0cf503c0f2c01ff39bc664d45b325e9 Mon Sep 17 00:00:00 2001 From: TomuHirata Date: Thu, 19 Oct 2023 08:54:54 +0900 Subject: [PATCH 022/101] Docs: Update linting command (#9997) Signed-off-by: TomeHirata --- .github/workflows/recipe-template.yml | 10 ++++++++-- CONTRIBUTING.md | 2 +- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/.github/workflows/recipe-template.yml b/.github/workflows/recipe-template.yml index 6bb23e4624cfc..5f16b6a456e1e 100644 --- a/.github/workflows/recipe-template.yml +++ b/.github/workflows/recipe-template.yml @@ -55,9 +55,15 @@ jobs: - name: Install dependencies run: | pip install -r ${{ github.event.inputs.repository }}/requirements/lint-requirements.txt - - name: Run lint checks + - name: Install pre-commit hooks run: | - cd ${{ github.event.inputs.repository }} && ../../dev/lint.sh + cd ${{ github.event.inputs.repository }} + pre-commit install -t pre-commit -t prepare-commit-msg + - name: Run pre-commit + id: pre-commit + run: | + cd ${{ github.event.inputs.repository }} + pre-commit run --all-files recipe: runs-on: ubuntu-latest timeout-minutes: 120 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 95ebe516c1669..083ba6b28f826 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -580,7 +580,7 @@ Then, verify that the unit tests & linter pass before submitting a pull request by running: ```bash -./dev/lint.sh +pre-commit run --all-files ./dev/run-python-tests.sh ``` From da289438414979e77c07938d90b639b1d3303632 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Wed, 18 Oct 2023 18:26:05 -0700 Subject: [PATCH 023/101] Make all metrics in `mlflow.metrics` functions (#9999) Signed-off-by: Prithvi Kannan --- docs/source/python_api/mlflow.metrics.rst | 59 +- mlflow/metrics/__init__.py | 582 +++++++++--------- mlflow/models/evaluation/default_evaluator.py | 20 +- tests/evaluate/test_default_evaluator.py | 14 +- tests/metrics/test_metric_definitions.py | 58 +- 5 files changed, 376 insertions(+), 357 deletions(-) diff --git a/docs/source/python_api/mlflow.metrics.rst b/docs/source/python_api/mlflow.metrics.rst index 35dd22cb778b3..9f8a9acbdf414 100644 --- a/docs/source/python_api/mlflow.metrics.rst +++ b/docs/source/python_api/mlflow.metrics.rst @@ -54,64 +54,45 @@ Evaluation results are stored as :py:class:`MetricValue ` for evaluating models. These metrics are computed automatically depending on the ``model_type``. For more information on the ``model_type`` parameter, see :py:func:`mlflow.evaluate()` API. +We provide the following builtin factory functions to create :py:class:`EvaluationMetric ` for evaluating models. These metrics are computed automatically depending on the ``model_type``. For more information on the ``model_type`` parameter, see :py:func:`mlflow.evaluate()` API. -.. autodata:: mlflow.metrics.mae - :annotation: +.. autofunction:: mlflow.metrics.mae -.. autodata:: mlflow.metrics.mape - :annotation: +.. autofunction:: mlflow.metrics.mape -.. autodata:: mlflow.metrics.max_error - :annotation: +.. autofunction:: mlflow.metrics.max_error -.. autodata:: mlflow.metrics.mse - :annotation: +.. autofunction:: mlflow.metrics.mse -.. autodata:: mlflow.metrics.rmse - :annotation: +.. autofunction:: mlflow.metrics.rmse -.. autodata:: mlflow.metrics.r2_score - :annotation: +.. autofunction:: mlflow.metrics.r2_score -.. autodata:: mlflow.metrics.precision_score - :annotation: +.. autofunction:: mlflow.metrics.precision_score -.. autodata:: mlflow.metrics.recall_score - :annotation: +.. autofunction:: mlflow.metrics.recall_score -.. autodata:: mlflow.metrics.f1_score - :annotation: +.. autofunction:: mlflow.metrics.f1_score -.. autodata:: mlflow.metrics.ari_grade_level - :annotation: +.. autofunction:: mlflow.metrics.ari_grade_level -.. autodata:: mlflow.metrics.flesch_kincaid_grade_level - :annotation: +.. autofunction:: mlflow.metrics.flesch_kincaid_grade_level -.. autodata:: mlflow.metrics.perplexity - :annotation: +.. autofunction:: mlflow.metrics.perplexity -.. autodata:: mlflow.metrics.rouge1 - :annotation: +.. autofunction:: mlflow.metrics.rouge1 -.. autodata:: mlflow.metrics.rouge2 - :annotation: +.. autofunction:: mlflow.metrics.rouge2 -.. autodata:: mlflow.metrics.rougeL - :annotation: +.. autofunction:: mlflow.metrics.rougeL -.. autodata:: mlflow.metrics.rougeLsum - :annotation: +.. autofunction:: mlflow.metrics.rougeLsum -.. autodata:: mlflow.metrics.toxicity - :annotation: +.. autofunction:: mlflow.metrics.toxicity -.. autodata:: mlflow.metrics.token_count - :annotation: +.. autofunction:: mlflow.metrics.token_count -.. autodata:: mlflow.metrics.latency - :annotation: +.. autofunction:: mlflow.metrics.latency Users create their own :py:class:`EvaluationMetric ` using the :py:func:`make_metric ` factory function diff --git a/mlflow/metrics/__init__.py b/mlflow/metrics/__init__.py index 836f83f7a3be6..f7811be7b57f8 100644 --- a/mlflow/metrics/__init__.py +++ b/mlflow/metrics/__init__.py @@ -35,333 +35,365 @@ EvaluationMetric, make_metric, ) +from mlflow.utils.annotations import experimental -latency = make_metric( - eval_fn=lambda x: MetricValue(), - greater_is_better=False, - name="latency", -) -latency.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. - -A metric for calculating latency. Latency is determined by the time it takes to generate a -prediction for a given input. Note that computing latency requires each row to be predicted -sequentially, which will likely slow down the evaluation process. -""" - -# general text metrics -token_count = make_metric( - eval_fn=_token_count_eval_fn, - greater_is_better=True, - name="token_count", -) -token_count.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. - -A metric for calculating token_count. Token count is calculated using tiktoken by using the -`cl100k_base` tokenizer. -""" - -toxicity = make_metric( - eval_fn=_toxicity_eval_fn, - greater_is_better=False, - name="toxicity", - long_name="toxicity/roberta-hate-speech-dynabench-r4", - version="v1", -) -toxicity.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. - -A metric for evaluating `toxicity`_ using the model `roberta-hate-speech-dynabench-r4`_, -which defines hate as "abusive speech targeting specific group characteristics, such as -ethnic origin, religion, gender, or sexual orientation." - -The score ranges from 0 to 1, where scores closer to 1 are more toxic. The default threshold -for a text to be considered "toxic" is 0.5. - -Aggregations calculated for this metric: - - ratio (of toxic input texts) - -.. _toxicity: https://huggingface.co/spaces/evaluate-measurement/toxicity -.. _roberta-hate-speech-dynabench-r4: https://huggingface.co/facebook/roberta-hate-speech-dynabench-r4-target -""" - -perplexity = make_metric( - eval_fn=_perplexity_eval_fn, - greater_is_better=False, - name="perplexity", - long_name="perplexity/gpt2", - version="v1", -) -""" -.. Note:: Experimental: This metric may change or be removed in a future release without warning. - -A metric for evaluating `perplexity`_ using the model gpt2. -The score ranges from 0 to infinity, where a lower score means that the model is better at -predicting the given text and a higher score means that the model is not likely to predict the text. +@experimental +def latency() -> EvaluationMetric: + """ + This function will create a metric for calculating latency. Latency is determined by the time + it takes to generate a prediction for a given input. Note that computing latency requires + each row to be predicted sequentially, which will likely slow down the evaluation process. + """ + return make_metric( + eval_fn=lambda x: MetricValue(), + greater_is_better=False, + name="latency", + ) -Aggregations calculated for this metric: - - mean -.. _perplexity: https://huggingface.co/spaces/evaluate-metric/perplexity -""" - -flesch_kincaid_grade_level = make_metric( - eval_fn=_flesch_kincaid_eval_fn, - greater_is_better=False, - name="flesch_kincaid_grade_level", - version="v1", -) -flesch_kincaid_grade_level.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. - -A metric for calculating `flesch kincaid grade level`_ using `textstat`_. - -This metric outputs a number that approximates the grade level needed to comprehend the text, which -will likely range from around 0 to 15 (although it is not limited to this range). - -Aggregations calculated for this metric: - - mean - -.. _flesch kincaid grade level: - https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests#Flesch%E2%80%93Kincaid_grade_level -.. _textstat: https://pypi.org/project/textstat/ -""" - -ari_grade_level = make_metric( - eval_fn=_ari_eval_fn, - greater_is_better=False, - name="ari_grade_level", - long_name="automated_readability_index_grade_level", - version="v1", -) -ari_grade_level.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. - -A metric for calculating `automated readability index`_ using `textstat`_. - -This metric outputs a number that approximates the grade level needed to comprehend the text, which -will likely range from around 0 to 15 (although it is not limited to this range). - -Aggregations calculated for this metric: - - mean +# general text metrics +@experimental +def token_count() -> EvaluationMetric: + """ + This function will create a metric for calculating token_count. Token count is calculated + using tiktoken by using the `cl100k_base` tokenizer. + """ + return make_metric( + eval_fn=_token_count_eval_fn, + greater_is_better=True, + name="token_count", + ) + + +@experimental +def toxicity() -> EvaluationMetric: + """ + This function will create a metric for evaluating `toxicity`_ using the model + `roberta-hate-speech-dynabench-r4`_, which defines hate as "abusive speech targeting + specific group characteristics, such as ethnic origin, religion, gender, or sexual + orientation." + + The score ranges from 0 to 1, where scores closer to 1 are more toxic. The default threshold + for a text to be considered "toxic" is 0.5. + + Aggregations calculated for this metric: + - ratio (of toxic input texts) + + .. _toxicity: https://huggingface.co/spaces/evaluate-measurement/toxicity + .. _roberta-hate-speech-dynabench-r4: https://huggingface.co/facebook/roberta-hate-speech-dynabench-r4-target + """ + return make_metric( + eval_fn=_toxicity_eval_fn, + greater_is_better=False, + name="toxicity", + long_name="toxicity/roberta-hate-speech-dynabench-r4", + version="v1", + ) + + +@experimental +def perplexity() -> EvaluationMetric: + """ + This function will create a metric for evaluating `perplexity`_ using the model gpt2. + + The score ranges from 0 to infinity, where a lower score means that the model is better at + predicting the given text and a higher score means that the model is not likely to predict the + text. + + Aggregations calculated for this metric: + - mean + + .. _perplexity: https://huggingface.co/spaces/evaluate-metric/perplexity + """ + return make_metric( + eval_fn=_perplexity_eval_fn, + greater_is_better=False, + name="perplexity", + long_name="perplexity/gpt2", + version="v1", + ) + + +@experimental +def flesch_kincaid_grade_level() -> EvaluationMetric: + """ + This function will create a metric for calculating `flesch kincaid grade level`_ using + `textstat`_. + + This metric outputs a number that approximates the grade level needed to comprehend the text, + which will likely range from around 0 to 15 (although it is not limited to this range). + + Aggregations calculated for this metric: + - mean + + .. _flesch kincaid grade level: + https://en.wikipedia.org/wiki/Flesch%E2%80%93Kincaid_readability_tests#Flesch%E2%80%93Kincaid_grade_level + .. _textstat: https://pypi.org/project/textstat/ + """ + return make_metric( + eval_fn=_flesch_kincaid_eval_fn, + greater_is_better=False, + name="flesch_kincaid_grade_level", + version="v1", + ) + + +@experimental +def ari_grade_level() -> EvaluationMetric: + """ + This function will create a metric for calculating `automated readability index`_ using + `textstat`_. + + This metric outputs a number that approximates the grade level needed to comprehend the text, + which will likely range from around 0 to 15 (although it is not limited to this range). + + Aggregations calculated for this metric: + - mean + + .. _automated readability index: https://en.wikipedia.org/wiki/Automated_readability_index + .. _textstat: https://pypi.org/project/textstat/ + """ + return make_metric( + eval_fn=_ari_eval_fn, + greater_is_better=False, + name="ari_grade_level", + long_name="automated_readability_index_grade_level", + version="v1", + ) -.. _automated readability index: https://en.wikipedia.org/wiki/Automated_readability_index -.. _textstat: https://pypi.org/project/textstat/ -""" # question answering metrics +@experimental +def exact_match() -> EvaluationMetric: + """ + This function will create a metric for calculating `accuracy`_ using sklearn. -exact_match = make_metric( - eval_fn=_accuracy_eval_fn, greater_is_better=True, name="exact_match", version="v1" -) -exact_match.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. + This metric only computes an aggregate score which ranges from 0 to 1. -A metric for calculating `accuracy`_ using sklearn. + .. _accuracy: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html + """ + return make_metric( + eval_fn=_accuracy_eval_fn, greater_is_better=True, name="exact_match", version="v1" + ) -This metric only computes an aggregate score which ranges from 0 to 1. - -.. _accuracy: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html -""" # text summarization metrics +@experimental +def rouge1() -> EvaluationMetric: + """ + This function will create a metric for evaluating `rouge1`_. + + The score ranges from 0 to 1, where a higher score indicates higher similarity. + `rouge1`_ uses unigram based scoring to calculate similarity. + + Aggregations calculated for this metric: + - mean + + .. _rouge1: https://huggingface.co/spaces/evaluate-metric/rouge + """ + return make_metric( + eval_fn=_rouge1_eval_fn, + greater_is_better=True, + name="rouge1", + version="v1", + ) + + +@experimental +def rouge2() -> EvaluationMetric: + """ + This function will create a metric for evaluating `rouge2`_. + + The score ranges from 0 to 1, where a higher score indicates higher similarity. + `rouge2`_ uses bigram based scoring to calculate similarity. + + Aggregations calculated for this metric: + - mean + + .. _rouge2: https://huggingface.co/spaces/evaluate-metric/rouge + """ + return make_metric( + eval_fn=_rouge2_eval_fn, + greater_is_better=True, + name="rouge2", + version="v1", + ) + + +@experimental +def rougeL() -> EvaluationMetric: + """ + This function will create a metric for evaluating `rougeL`_. + + The score ranges from 0 to 1, where a higher score indicates higher similarity. + `rougeL`_ uses unigram based scoring to calculate similarity. + + Aggregations calculated for this metric: + - mean + + .. _rougeL: https://huggingface.co/spaces/evaluate-metric/rouge + """ + return make_metric( + eval_fn=_rougeL_eval_fn, + greater_is_better=True, + name="rougeL", + version="v1", + ) + + +@experimental +def rougeLsum() -> EvaluationMetric: + """ + This function will create a metric for evaluating `rougeLsum`_. + + The score ranges from 0 to 1, where a higher score indicates higher similarity. + `rougeLsum`_ uses longest common subsequence based scoring to calculate similarity. + + Aggregations calculated for this metric: + - mean + + .. _rougeLsum: https://huggingface.co/spaces/evaluate-metric/rouge + """ + return make_metric( + eval_fn=_rougeLsum_eval_fn, + greater_is_better=True, + name="rougeLsum", + version="v1", + ) -rouge1 = make_metric( - eval_fn=_rouge1_eval_fn, - greater_is_better=True, - name="rouge1", - version="v1", -) -rouge1.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. - -A metric for evaluating `rouge1`_. - -The score ranges from 0 to 1, where a higher score indicates higher similarity. -`rouge1`_ uses unigram based scoring to calculate similarity. -Aggregations calculated for this metric: - - mean - -.. _rouge1: https://huggingface.co/spaces/evaluate-metric/rouge -""" - -rouge2 = make_metric( - eval_fn=_rouge2_eval_fn, - greater_is_better=True, - name="rouge2", - version="v1", -) -rouge2.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. - -A metric for evaluating `rouge2`_. - -The score ranges from 0 to 1, where a higher score indicates higher similarity. -`rouge2`_ uses bigram based scoring to calculate similarity. - -Aggregations calculated for this metric: - - mean - -.. _rouge2: https://huggingface.co/spaces/evaluate-metric/rouge -""" - -rougeL = make_metric( - eval_fn=_rougeL_eval_fn, - greater_is_better=True, - name="rougeL", - version="v1", -) -rougeL.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. +# General Regression Metrics +def mae() -> EvaluationMetric: + """ + This function will create a metric for evaluating `mae`_. -A metric for evaluating `rougeL`_. + This metric computes an aggregate score for the mean absolute error for regression. -The score ranges from 0 to 1, where a higher score indicates higher similarity. -`rougeL`_ uses unigram based scoring to calculate similarity. + .. _mae: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html + """ + return make_metric( + eval_fn=_mae_eval_fn, + greater_is_better=False, + name="mean_absolute_error", + ) -Aggregations calculated for this metric: - - mean -.. _rougeL: https://huggingface.co/spaces/evaluate-metric/rouge -""" +def mse() -> EvaluationMetric: + """ + This function will create a metric for evaluating `mse`_. -rougeLsum = make_metric( - eval_fn=_rougeLsum_eval_fn, - greater_is_better=True, - name="rougeLsum", - version="v1", -) -rougeLsum.__doc__ = """ -.. Note:: Experimental: This metric may change or be removed in a future release without warning. + This metric computes an aggregate score for the mean squared error for regression. -A metric for evaluating `rougeLsum`_. + .. _mse: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html + """ + return make_metric( + eval_fn=_mse_eval_fn, + greater_is_better=False, + name="mean_squared_error", + ) -The score ranges from 0 to 1, where a higher score indicates higher similarity. -`rougeLsum`_ uses longest common subsequence based scoring to calculate similarity. -Aggregations calculated for this metric: - - mean +def rmse() -> EvaluationMetric: + """ + This function will create a metric for evaluating the square root of `mse`_. -.. _rougeLsum: https://huggingface.co/spaces/evaluate-metric/rouge -""" + This metric computes an aggregate score for the root mean absolute error for regression. -# General Regression Metrics + .. _mse: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html + """ -mae = make_metric( - eval_fn=_mae_eval_fn, - greater_is_better=False, - name="mean_absolute_error", -) -mae.__doc__ = """ -A metric for evaluating `mae`_. + return make_metric( + eval_fn=_rmse_eval_fn, + greater_is_better=False, + name="root_mean_squared_error", + ) -This metric computes an aggregate score for the mean absolute error for regression. -.. _mae: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_error.html -""" +def r2_score() -> EvaluationMetric: + """ + This function will create a metric for evaluating `r2_score`_. -mse = make_metric( - eval_fn=_mse_eval_fn, - greater_is_better=False, - name="mean_squared_error", -) -mse.__doc__ = """ -A metric for evaluating `mse`_. + This metric computes an aggregate score for the coefficient of determination. R2 ranges from + negative infinity to 1, and measures the percentage of variance explained by the predictor + variables in a regression. -This metric computes an aggregate score for the mean squared error for regression. + .. _r2_score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html + """ + return make_metric( + eval_fn=_r2_score_eval_fn, + greater_is_better=True, + name="r2_score", + ) -.. _mse: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html -""" -rmse = make_metric( - eval_fn=_rmse_eval_fn, - greater_is_better=False, - name="root_mean_squared_error", -) -rmse.__doc__ = """ -A metric for evaluating the square root of `mse`_. +def max_error() -> EvaluationMetric: + """ + This function will create a metric for evaluating `max_error`_. -This metric computes an aggregate score for the root mean absolute error for regression. + This metric computes an aggregate score for the maximum residual error for regression. -.. _mse: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html -""" + .. _max_error: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.max_error.html + """ + return make_metric( + eval_fn=_max_error_eval_fn, + greater_is_better=False, + name="max_error", + ) -r2_score = make_metric( - eval_fn=_r2_score_eval_fn, - greater_is_better=True, - name="r2_score", -) -r2_score.__doc__ = """ -A metric for evaluating `r2_score`_. -This metric computes an aggregate score for the coefficient of determination. R2 ranges from -negative infinity to 1, and measures the percentage of variance explained by the predictor -variables in a regression. +def mape() -> EvaluationMetric: + """ + This function will create a metric for evaluating `mape`_. -.. _r2_score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.r2_score.html -""" + This metric computes an aggregate score for the mean absolute percentage error for regression. -max_error = make_metric( - eval_fn=_max_error_eval_fn, - greater_is_better=False, - name="max_error", -) -max_error.__doc__ = """ -A metric for evaluating `max_error`_. + .. _mape: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_percentage_error.html + """ + return make_metric( + eval_fn=_mape_eval_fn, + greater_is_better=False, + name="mean_absolute_percentage_error", + ) -This metric computes an aggregate score for the maximum residual error for regression. -.. _max_error: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.max_error.html -""" +# Binary Classification Metrics -mape = make_metric( - eval_fn=_mape_eval_fn, - greater_is_better=False, - name="mean_absolute_percentage_error", -) -mape.__doc__ = """ -A metric for evaluating `mape`_. -This metric computes an aggregate score for the mean absolute percentage error for regression. +def recall_score() -> EvaluationMetric: + """ + This function will create a metric for evaluating `recall`_ for classification. -.. _mape: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_absolute_percentage_error.html -""" + This metric computes an aggregate score between 0 and 1 for the recall of a classification task. -# Binary Classification Metrics + .. _recall: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html + """ + return make_metric(eval_fn=_recall_eval_fn, greater_is_better=True, name="recall_score") -recall_score = make_metric(eval_fn=_recall_eval_fn, greater_is_better=True, name="recall_score") -recall_score.__doc__ = """ -A metric for evaluating `recall`_ for classification. -This metric computes an aggregate score between 0 and 1 for the recall of a classification task. +def precision_score() -> EvaluationMetric: + """ + This function will create a metric for evaluating `precision`_ for classification. -.. _recall: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html -""" + This metric computes an aggregate score between 0 and 1 for the precision of + classification task. -precision_score = make_metric( - eval_fn=_precision_eval_fn, greater_is_better=True, name="precision_score" -) -precision_score.__doc__ = """ -A metric for evaluating `precision`_ for classification. + .. _precision: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html + """ + return make_metric(eval_fn=_precision_eval_fn, greater_is_better=True, name="precision_score") -This metric computes an aggregate score between 0 and 1 for the precision of -classification task. -.. _precision: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.precision_score.html -""" +def f1_score() -> EvaluationMetric: + """ + This function will create a metric for evaluating `f1_score`_ for binary classification. -f1_score = make_metric(eval_fn=_f1_score_eval_fn, greater_is_better=True, name="f1_score") -f1_score.__doc__ = """ -A metric for evaluating `f1_score`_ for binary classification. + This metric computes an aggregate score between 0 and 1 for the F1 score (F-measure) of a + classification task. F1 score is defined as 2 * (precision * recall) / (precision + recall). -This metric computes an aggregate score between 0 and 1 for the F1 score (F-measure) of a -classification task. F1 score is defined as 2 * (precision * recall) / (precision + recall). + .. _f1_score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html + """ + return make_metric(eval_fn=_f1_score_eval_fn, greater_is_better=True, name="f1_score") -.. _f1_score: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html -""" __all__ = [ "EvaluationExample", diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index e1484bfbf8235..c2b98e20da5f3 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1516,11 +1516,11 @@ def _evaluate( self.builtin_metrics = {} text_metrics = [ - token_count, - toxicity, - perplexity, - flesch_kincaid_grade_level, - ari_grade_level, + token_count(), + toxicity(), + perplexity(), + flesch_kincaid_grade_level(), + ari_grade_level(), ] with mlflow.utils.autologging_utils.disable_autologging(): @@ -1538,9 +1538,15 @@ def _evaluate( if self.model_type in (_ModelType.CLASSIFIER, _ModelType.REGRESSOR): self._compute_builtin_metrics() elif self.model_type == _ModelType.QUESTION_ANSWERING: - self.builtin_metrics = [*text_metrics, exact_match] + self.builtin_metrics = [*text_metrics, exact_match()] elif self.model_type == _ModelType.TEXT_SUMMARIZATION: - self.builtin_metrics = [*text_metrics, rouge1, rouge2, rougeL, rougeLsum] + self.builtin_metrics = [ + *text_metrics, + rouge1(), + rouge2(), + rougeL(), + rougeLsum(), + ] elif self.model_type == _ModelType.TEXT: self.builtin_metrics = text_metrics diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index bf0a11a44dcfe..d1cb00499ede1 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -2806,7 +2806,7 @@ def test_evaluate_no_model_type_with_builtin_metric(): results = mlflow.evaluate( model_info.model_uri, data, - extra_metrics=[mlflow.metrics.perplexity], + extra_metrics=[mlflow.metrics.perplexity()], ) assert results.metrics.keys() == { "perplexity/v1/mean", @@ -2878,11 +2878,11 @@ def test_default_metrics_as_custom_metrics(): targets="truth", model_type="question-answering", custom_metrics=[ - mlflow.metrics.flesch_kincaid_grade_level, - mlflow.metrics.perplexity, - mlflow.metrics.ari_grade_level, - mlflow.metrics.toxicity, - mlflow.metrics.exact_match, + mlflow.metrics.flesch_kincaid_grade_level(), + mlflow.metrics.perplexity(), + mlflow.metrics.ari_grade_level(), + mlflow.metrics.toxicity(), + mlflow.metrics.exact_match(), ], evaluators="default", evaluator_config={ @@ -2910,7 +2910,7 @@ def test_evaluate_with_latency(): data, model_type="text", evaluators="default", - extra_metrics=[mlflow.metrics.latency], + extra_metrics=[mlflow.metrics.latency()], ) client = mlflow.MlflowClient() diff --git a/tests/metrics/test_metric_definitions.py b/tests/metrics/test_metric_definitions.py index 1cb0768f571e6..52acf86f56cbd 100644 --- a/tests/metrics/test_metric_definitions.py +++ b/tests/metrics/test_metric_definitions.py @@ -29,15 +29,15 @@ @pytest.mark.parametrize( "metric", [ - ari_grade_level, - exact_match, - flesch_kincaid_grade_level, - perplexity, - rouge1, - rouge2, - rougeL, - rougeLsum, - toxicity, + ari_grade_level(), + exact_match(), + flesch_kincaid_grade_level(), + perplexity(), + rouge1(), + rouge2(), + rougeL(), + rougeLsum(), + toxicity(), ], ) def test_return_type_and_len_with_target(metric): @@ -61,7 +61,7 @@ def _is_toxic(score): def test_toxicity(): predictions = pd.Series(["A normal sentence", "All women are bad"]) - result = toxicity.eval_fn(predictions, None, {}) + result = toxicity().eval_fn(predictions, None, {}) assert not _is_toxic(result.scores[0]) assert _is_toxic(result.scores[1]) assert result.aggregate_results["ratio"] == 0.5 @@ -72,7 +72,7 @@ def test_toxicity(): def test_perplexity(): predictions = pd.Series(["sentence not", "This is a sentence"]) - result = perplexity.eval_fn(predictions, None, {}) + result = perplexity().eval_fn(predictions, None, {}) # A properly structured sentence should have lower perplexity assert result.scores[0] > result.scores[1] assert result.aggregate_results["mean"] == (result.scores[0] + result.scores[1]) / 2 @@ -90,7 +90,7 @@ def test_flesch_kincaid_grade_level(): ), ] ) - result = flesch_kincaid_grade_level.eval_fn(predictions, None, {}) + result = flesch_kincaid_grade_level().eval_fn(predictions, None, {}) assert result.scores[0] < result.scores[1] assert result.aggregate_results["mean"] == (result.scores[0] + result.scores[1]) / 2 assert result.scores[0] < result.aggregate_results["p90"] < result.scores[1] @@ -107,7 +107,7 @@ def test_ari_grade_level(): ), ] ) - result = ari_grade_level.eval_fn(predictions, None, {}) + result = ari_grade_level().eval_fn(predictions, None, {}) assert result.scores[0] < result.scores[1] assert result.aggregate_results["mean"] == (result.scores[0] + result.scores[1]) / 2 assert result.scores[0] < result.aggregate_results["p90"] < result.scores[1] @@ -118,19 +118,19 @@ def test_exact_match(): predictions = pd.Series(["sentence not", "random text", "a", "c"]) targets = pd.Series(["sentence not", "random text", "a", "c"]) - result = exact_match.eval_fn(predictions, targets, {}) + result = exact_match().eval_fn(predictions, targets, {}) assert result.aggregate_results["exact_match"] == 1.0 predictions = pd.Series(["not sentence", "random text", "b", "c"]) targets = pd.Series(["sentence not", "random text", "a", "c"]) - result = exact_match.eval_fn(predictions, targets, {}) + result = exact_match().eval_fn(predictions, targets, {}) assert result.aggregate_results["exact_match"] == 0.5 def test_rouge1(): predictions = pd.Series(["a", "d c"]) targets = pd.Series(["d", "b c"]) - result = rouge1.eval_fn(predictions, targets, {}) + result = rouge1().eval_fn(predictions, targets, {}) assert result.scores[0] == 0.0 assert result.scores[1] == 0.5 assert result.aggregate_results["mean"] == 0.25 @@ -141,7 +141,7 @@ def test_rouge1(): def test_rouge2(): predictions = pd.Series(["a e", "b c e"]) targets = pd.Series(["a e", "b c d"]) - result = rouge2.eval_fn(predictions, targets, {}) + result = rouge2().eval_fn(predictions, targets, {}) assert result.scores[0] == 1.0 assert result.scores[1] == 0.5 assert result.aggregate_results["mean"] == 0.75 @@ -152,7 +152,7 @@ def test_rouge2(): def test_rougeL(): predictions = pd.Series(["a", "b c"]) targets = pd.Series(["d", "b c"]) - result = rougeL.eval_fn(predictions, targets, {}) + result = rougeL().eval_fn(predictions, targets, {}) assert result.scores[0] == 0.0 assert result.scores[1] == 1.0 assert result.aggregate_results["mean"] == 0.5 @@ -163,7 +163,7 @@ def test_rougeL(): def test_rougeLsum(): predictions = pd.Series(["a", "b c"]) targets = pd.Series(["d", "b c"]) - result = rougeLsum.eval_fn(predictions, targets, {}) + result = rougeLsum().eval_fn(predictions, targets, {}) assert result.scores[0] == 0.0 assert result.scores[1] == 1.0 assert result.aggregate_results["mean"] == 0.5 @@ -176,7 +176,7 @@ def test_fails_to_load_metric(): e = ImportError("mocked error") with mock.patch("evaluate.load", side_effect=e) as mock_load: with mock.patch("mlflow.metrics.metric_definitions._logger.warning") as mock_warning: - toxicity.eval_fn(predictions, None, {}) + toxicity().eval_fn(predictions, None, {}) mock_load.assert_called_once_with("toxicity", module_type="measurement") mock_warning.assert_called_once_with( f"Failed to load 'toxicity' metric (error: {e!r}), skipping metric logging.", @@ -186,61 +186,61 @@ def test_fails_to_load_metric(): def test_mae(): predictions = pd.Series([1.0, 2.0, 0.0]) targets = pd.Series([1.0, 2.0, 3.0]) - result = mae.eval_fn(predictions, targets, {}) + result = mae().eval_fn(predictions, targets, {}) assert result.aggregate_results["mean_absolute_error"] == 1.0 def test_mse(): predictions = pd.Series([1.0, 2.0, 0.0]) targets = pd.Series([1.0, 2.0, 3.0]) - result = mse.eval_fn(predictions, targets, {}) + result = mse().eval_fn(predictions, targets, {}) assert result.aggregate_results["mean_squared_error"] == 3.0 def test_rmse(): predictions = pd.Series([4.0, 5.0, 0.0]) targets = pd.Series([1.0, 2.0, 3.0]) - result = rmse.eval_fn(predictions, targets, {}) + result = rmse().eval_fn(predictions, targets, {}) assert result.aggregate_results["root_mean_squared_error"] == 3.0 def test_r2_score(): predictions = pd.Series([1.0, 2.0, 3.0]) targets = pd.Series([3.0, 2.0, 1.0]) - result = r2_score.eval_fn(predictions, targets, {}) + result = r2_score().eval_fn(predictions, targets, {}) assert result.aggregate_results["r2_score"] == -3.0 def test_max_error(): predictions = pd.Series([1.0, 2.0, 3.0]) targets = pd.Series([3.0, 2.0, 1.0]) - result = max_error.eval_fn(predictions, targets, {}) + result = max_error().eval_fn(predictions, targets, {}) assert result.aggregate_results["max_error"] == 2.0 def test_mape_error(): predictions = pd.Series([1.0, 1.0, 1.0]) targets = pd.Series([2.0, 2.0, 2.0]) - result = mape.eval_fn(predictions, targets, {}) + result = mape().eval_fn(predictions, targets, {}) assert result.aggregate_results["mean_absolute_percentage_error"] == 0.5 def test_binary_recall_score(): predictions = pd.Series([0, 0, 1, 1, 0, 0, 0, 1]) targets = pd.Series([1, 1, 1, 1, 0, 0, 0, 0]) - result = recall_score.eval_fn(predictions, targets, {}) + result = recall_score().eval_fn(predictions, targets, {}) assert abs(result.aggregate_results["recall_score"] - 0.5) < 1e-3 def test_binary_precision(): predictions = pd.Series([0, 0, 1, 1, 0, 0, 0, 1]) targets = pd.Series([1, 1, 1, 1, 0, 0, 0, 0]) - result = precision_score.eval_fn(predictions, targets, {}) + result = precision_score().eval_fn(predictions, targets, {}) assert abs(result.aggregate_results["precision_score"] == 0.666) < 1e-3 def test_binary_f1_score(): predictions = pd.Series([0, 0, 1, 1, 0, 0, 0, 1]) targets = pd.Series([1, 1, 1, 1, 0, 0, 0, 0]) - result = f1_score.eval_fn(predictions, targets, {}) + result = f1_score().eval_fn(predictions, targets, {}) assert abs(result.aggregate_results["f1_score"] - 0.5713) < 1e-3 From b322f0892d7cf248c51bfc7b6181f0c278a5365f Mon Sep 17 00:00:00 2001 From: Daniel Lok Date: Thu, 19 Oct 2023 13:10:54 +0800 Subject: [PATCH 024/101] Gateway dependency (#9991) Signed-off-by: Daniel Lok --- mlflow/spark/__init__.py | 6 ++---- mlflow/utils/_capture_modules.py | 11 +++++++++- mlflow/utils/requirements_utils.py | 29 +++++++++++++++++++++----- tests/utils/test_requirements_utils.py | 22 +++++++++++++++++++ 4 files changed, 58 insertions(+), 10 deletions(-) diff --git a/mlflow/spark/__init__.py b/mlflow/spark/__init__.py index 199a6b13f1c4f..ddcb51608feb5 100644 --- a/mlflow/spark/__init__.py +++ b/mlflow/spark/__init__.py @@ -94,10 +94,8 @@ def get_default_pip_requirements(is_spark_connect_model=False): # Strip the suffix from `dev` versions of PySpark, which are not # available for installation from Anaconda or PyPI - pyspark_extras = ["connect"] if is_spark_connect_model else None - pyspark_req = re.sub( - r"(\.?)dev.*$", "", _get_pinned_requirement("pyspark", extras=pyspark_extras) - ) + pyspark_req_str = "pyspark[connect]" if is_spark_connect_model else "pyspark" + pyspark_req = re.sub(r"(\.?)dev.*$", "", _get_pinned_requirement(pyspark_req_str)) reqs = [pyspark_req] if Version(pyspark.__version__) < Version("3.4"): # Versions of PySpark < 3.4 are incompatible with pandas >= 2 diff --git a/mlflow/utils/_capture_modules.py b/mlflow/utils/_capture_modules.py index f0e80e0c223b7..c649e50e17add 100644 --- a/mlflow/utils/_capture_modules.py +++ b/mlflow/utils/_capture_modules.py @@ -14,7 +14,10 @@ from mlflow.pyfunc import MAIN from mlflow.utils._spark_utils import _prepare_subprocess_environ_for_creating_local_spark_session from mlflow.utils.file_utils import write_to -from mlflow.utils.requirements_utils import DATABRICKS_MODULES_TO_PACKAGES +from mlflow.utils.requirements_utils import ( + DATABRICKS_MODULES_TO_PACKAGES, + MLFLOW_MODULES_TO_PACKAGES, +) def _get_top_level_module(full_module_name): @@ -79,6 +82,12 @@ def _record_imported_module(self, full_module_name): self.imported_modules.add(databricks_module) return + # special casing for mlflow extras since they may not be required by default + if top_level_module == "mlflow": + if second_level_module in MLFLOW_MODULES_TO_PACKAGES: + self.imported_modules.add(second_level_module) + return + self.imported_modules.add(top_level_module) def __enter__(self): diff --git a/mlflow/utils/requirements_utils.py b/mlflow/utils/requirements_utils.py index 9233e52a7303d..57e1e67324aae 100644 --- a/mlflow/utils/requirements_utils.py +++ b/mlflow/utils/requirements_utils.py @@ -330,6 +330,9 @@ def _capture_imported_modules(model_uri, flavor): "databricks.automl_runtime": ["databricks-automl-runtime"], "databricks.model_monitoring": ["databricks-model-monitoring"], } +MLFLOW_MODULES_TO_PACKAGES = { + "mlflow.gateway": ["mlflow[gateway]"], +} _MODULES_TO_PACKAGES = None _PACKAGES_TO_MODULES = None @@ -342,6 +345,9 @@ def _init_modules_to_packages_map(): # https://importlib-metadata.readthedocs.io/en/latest/using.html#using-importlib-metadata _MODULES_TO_PACKAGES = importlib_metadata.packages_distributions() + # Add mapping for MLFlow extras + _MODULES_TO_PACKAGES.update(MLFLOW_MODULES_TO_PACKAGES) + # Multiple packages populate the `databricks` module namespace on Databricks; to avoid # bundling extraneous Databricks packages into model dependencies, we scope each module # to its relevant package @@ -417,7 +423,9 @@ def _infer_requirements(model_uri, flavor): *_MODULES_TO_PACKAGES.get("mlflow", []), ] packages = packages - set(excluded_packages) - unrecognized_packages = packages - _PYPI_PACKAGE_INDEX.package_names + + # manually exclude mlflow[gateway] as it isn't listed separately in PYPI_PACKAGE_INDEX + unrecognized_packages = packages - _PYPI_PACKAGE_INDEX.package_names - {"mlflow[gateway]"} if unrecognized_packages: _logger.warning( "The following packages were not found in the public PyPI package index as of" @@ -426,6 +434,7 @@ def _infer_requirements(model_uri, flavor): _PYPI_PACKAGE_INDEX.date, unrecognized_packages, ) + return sorted(map(_get_pinned_requirement, packages)) @@ -462,18 +471,20 @@ def local(self): return version -def _get_pinned_requirement(package, version=None, module=None, extras=None): +def _get_pinned_requirement(req_str, version=None, module=None): """ Returns a string representing a pinned pip requirement to install the specified package and version (e.g. 'mlflow==1.2.3'). - :param package: The name of the package. + :param req_str: The package requirement string (e.g. "mlflow" or "mlflow[gateway]"). :param version: The version of the package. If None, defaults to the installed version. :param module: The name of the top-level module provided by the package . For example, if `package` is 'scikit-learn', `module` should be 'sklearn'. If None, defaults to `package`. :param extras: A list of extra names for the package """ + req = Requirement(req_str) + package = req.name if version is None: version_raw = _get_installed_version(package, module) local_version_label = _get_local_version_label(version_raw) @@ -492,8 +503,8 @@ def _get_pinned_requirement(package, version=None, module=None, extras=None): else: version = version_raw - if extras: - return f"{package}[{','.join(extras)}]=={version}" + if req.extras: + return f"{package}[{','.join(req.extras)}]=={version}" return f"{package}=={version}" @@ -533,6 +544,14 @@ def _check_requirement_satisfied(requirement_str): requirement=requirement_str, ) + if pkg_name == "mlflow" and "gateway" in req.extras: + try: + from mlflow import gateway # noqa: F401 + except ModuleNotFoundError: + return _MismatchedPackageInfo( + package_name="mlflow[gateway]", installed_version=None, requirement=requirement_str + ) + if ( pkg_name == "mlflow" and installed_version == mlflow.__version__ diff --git a/tests/utils/test_requirements_utils.py b/tests/utils/test_requirements_utils.py index 1dc2b977a17ec..2098e22fea130 100644 --- a/tests/utils/test_requirements_utils.py +++ b/tests/utils/test_requirements_utils.py @@ -8,6 +8,7 @@ import mlflow import mlflow.utils.requirements_utils +from mlflow.utils.environment import infer_pip_requirements from mlflow.utils.requirements_utils import ( _capture_imported_modules, _get_installed_version, @@ -410,3 +411,24 @@ def predict(self, context, model_input, params=None): captured_modules = _capture_imported_modules(model_info.model_uri, "pyfunc") assert "pandas" in captured_modules assert "sklearn" in captured_modules + + +def test_capture_imported_modules_includes_gateway_extra(): + class MyModel(mlflow.pyfunc.PythonModel): + def predict(self, _, inputs, params=None): + import mlflow.gateway # noqa: F401 + + return inputs + + with mlflow.start_run(): + model_info = mlflow.pyfunc.log_model( + python_model=MyModel(), + artifact_path="test_model", + input_example=([1, 2, 3]), + ) + + captured_modules = _capture_imported_modules(model_info.model_uri, "pyfunc") + assert "mlflow.gateway" in captured_modules + + pip_requirements = infer_pip_requirements(model_info.model_uri, "pyfunc") + assert f"mlflow[gateway]=={mlflow.__version__}" in pip_requirements From 94420d3f2a1d0ef2470b370a4c6edcf7711bb91e Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Thu, 19 Oct 2023 15:46:40 +0900 Subject: [PATCH 025/101] Support fail-fast label (#10006) Signed-off-by: harupy Signed-off-by: Harutaka Kawamura --- .github/workflows/master.yml | 2 +- dev/pytest.sh | 33 +++++++++++++++++++++++++++++++++ mlflow/ml-package-versions.yml | 4 ++-- 3 files changed, 36 insertions(+), 3 deletions(-) create mode 100755 dev/pytest.sh diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 771802be7039d..22e09d7bbd6a3 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -278,7 +278,7 @@ jobs: - name: Run tests run: | export MLFLOW_HOME=$(pwd) - pytest tests/evaluate + dev/pytest.sh tests/evaluate pyfunc: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false diff --git a/dev/pytest.sh b/dev/pytest.sh new file mode 100755 index 0000000000000..a6d8a61b65c41 --- /dev/null +++ b/dev/pytest.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# pytest runner to fail fast if the "fail-fast" label is present on the PR. + +fetch_labels() { + if [ -z $GITHUB_ACTIONS ]; then + echo "" + return + fi + + if [ "$GITHUB_EVENT_NAME" != "pull_request" ]; then + echo "" + return + fi + + PR_DATA=$(cat $GITHUB_EVENT_PATH) + PR_NUMBER=$(echo $PR_DATA | jq --raw-output .pull_request.number) + LABELS=$(curl -s https://api.github.com/repos/$GITHUB_REPOSITORY/issues/$PR_NUMBER/labels | jq --raw-output .[].name) + echo $LABELS +} + +main() { + LABELS=$(fetch_labels) + + if [[ $LABELS == *"fail-fast"* ]]; then + EXTRA_OPTIONS="--exitfirst" + fi + + echo "pytest $EXTRA_OPTIONS ${@:1}" + pytest $EXTRA_OPTIONS "${@:1}" +} + +main "$@" diff --git a/mlflow/ml-package-versions.yml b/mlflow/ml-package-versions.yml index 7aed33623a80e..4fdc2635d3fe1 100644 --- a/mlflow/ml-package-versions.yml +++ b/mlflow/ml-package-versions.yml @@ -126,7 +126,7 @@ xgboost: ">= 0.0.0": ["scikit-learn"] "< 1.6": ["pandas<2"] run: | - pytest tests/xgboost/test_xgboost_model_export.py + dev/pytest.sh tests/xgboost/test_xgboost_model_export.py autologging: minimum: "1.4.2" @@ -135,7 +135,7 @@ xgboost: ">= 0.0.0": ["scikit-learn", "matplotlib"] "< 1.6": ["pandas<2"] run: | - pytest tests/xgboost/test_xgboost_autolog.py + dev/pytest.sh tests/xgboost/test_xgboost_autolog.py lightgbm: package_info: From 20d44536876e9bd898b39e33f3ee3f87b52d2216 Mon Sep 17 00:00:00 2001 From: Sunish Sheth Date: Wed, 18 Oct 2023 23:49:43 -0700 Subject: [PATCH 026/101] [bug-fix] Fixing target and prediction mapping for eval_fn (#10002) Signed-off-by: Sunish Sheth --- mlflow/models/evaluation/base.py | 2 +- mlflow/models/evaluation/default_evaluator.py | 14 ++- tests/evaluate/test_default_evaluator.py | 99 ++++++++++++++++++- 3 files changed, 105 insertions(+), 10 deletions(-) diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 73a29a82beb45..b02c7a232b996 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -655,7 +655,7 @@ def has_predictions(self): @property def predictions_name(self): """ - return targets name + return predictions name """ return self._predictions_name diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index c2b98e20da5f3..86d99e1deebd8 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1138,17 +1138,18 @@ def _get_args_for_metrics(self, extra_metric, eval_df): eval_fn_args.append(copy.deepcopy(self.metrics)) else: for param_name, param in parameters.items(): - if param_name == "predictions": + column = self.col_mapping.get(param_name, param_name) + + if column == "predictions" or column == self.dataset.predictions_name: eval_fn_args.append(eval_df_copy["prediction"]) - elif param_name == "targets": + elif column == "targets" or column == self.dataset.targets_name: if "target" in eval_df_copy: eval_fn_args.append(eval_df_copy["target"]) else: eval_fn_args.append(None) - elif param_name == "metrics": + elif column == "metrics": eval_fn_args.append(copy.deepcopy(self.metrics_values)) else: - column = self.col_mapping.get(param_name, param_name) if not isinstance(column, str): eval_fn_args.append(column) elif column in input_df.columns: @@ -1447,7 +1448,10 @@ def _log_eval_table(self): metric_prefix = "" if self.dataset.has_targets: data = self.dataset.features_data.assign( - **{self.dataset.targets_name or "target": self.y, "outputs": self.y_pred} + **{ + self.dataset.targets_name or "target": self.y, + self.dataset.predictions_name or "outputs": self.y_pred, + } ) else: data = self.dataset.features_data.assign(outputs=self.y_pred) diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index d1cb00499ede1..2e8ed1b79a9c3 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -26,6 +26,7 @@ MetricValue, make_metric, ) +from mlflow.metrics.genai import model_utils from mlflow.models import Model from mlflow.models.evaluation.artifacts import ( CsvEvaluationArtifact, @@ -2143,10 +2144,12 @@ def language_model(inputs: list[str]) -> list[str]: return inputs -def validate_question_answering_logged_data(logged_data, with_targets=True): +def validate_question_answering_logged_data( + logged_data, with_targets=True, predictions_name="outputs" +): columns = { "question", - "outputs", + predictions_name, "toxicity/v1/score", "flesch_kincaid_grade_level/v1/score", "ari_grade_level/v1/score", @@ -2159,7 +2162,7 @@ def validate_question_answering_logged_data(logged_data, with_targets=True): assert set(logged_data.columns.tolist()) == columns assert logged_data["question"].tolist() == ["words random", "This is a sentence."] - assert logged_data["outputs"].tolist() == ["words random", "This is a sentence."] + assert logged_data[predictions_name].tolist() == ["words random", "This is a sentence."] assert logged_data["toxicity/v1/score"][0] < 0.5 assert logged_data["toxicity/v1/score"][1] < 0.5 assert logged_data["perplexity/v1/score"][0] > logged_data["perplexity/v1/score"][1] @@ -2260,7 +2263,7 @@ def test_evaluate_question_answering_on_static_dataset_with_targets(): artifacts = [a.path for a in client.list_artifacts(run.info.run_id)] assert "eval_results_table.json" in artifacts logged_data = pd.DataFrame(**results.artifacts["eval_results_table"].content) - validate_question_answering_logged_data(logged_data) + validate_question_answering_logged_data(logged_data, predictions_name="pred") assert set(results.metrics.keys()) == { "toxicity/v1/variance", "perplexity/v1/p90", @@ -2928,3 +2931,91 @@ def test_evaluate_with_latency(): "token_count", } assert all(isinstance(grade, float) for grade in logged_data["latency"]) + + +properly_formatted_openai_response1 = { + "candidates": [ + { + "text": '{\n "score": 3,\n "justification": "' "justification" '"\n}', + "metadata": {"finish_reason": "stop"}, + } + ], + "metadata": { + "input_tokens": 569, + "output_tokens": 93, + "total_tokens": 662, + "model": "gpt-3.5-turbo-0613", + "route_type": "llm/v1/completions", + }, +} + + +def test_evaluate_with_correctness(): + metric = mlflow.metrics.make_genai_metric( + name="correctness", + definition=( + "Correctness refers to how well the generated output matches " + "or aligns with the reference or ground truth text that is considered " + "accurate and appropriate for the given input. The ground truth serves as " + "a benchmark against which the provided output is compared to determine the " + "level of accuracy and fidelity." + ), + grading_prompt=( + "Correctness: If the answer correctly answer the question, below " + "are the details for different scores: " + "- Score 0: the answer is completely incorrect, doesn’t mention anything about " + "the question or is completely contrary to the correct answer. " + "- Score 1: the answer provides some relevance to the question and answer " + "one aspect of the question correctly. " + "- Score 2: the answer mostly answer the question but is missing or hallucinating " + "on one critical aspect. " + "- Score 4: the answer correctly answer the question and not missing any " + "major aspect" + ), + examples=[], + version="v1", + model="openai:/gpt-3.5-turbo-16k", + grading_context_columns=["ground_truth"], + parameters={"temperature": 0.0}, + aggregations=["mean", "variance", "p90"], + greater_is_better=True, + ) + + with mock.patch.object( + model_utils, + "score_model_on_payload", + return_value=properly_formatted_openai_response1, + ): + with mlflow.start_run(): + eval_df = pd.DataFrame( + { + "inputs": [ + "What is MLflow?", + "What is Spark?", + "What is Python?", + ], + "ground_truth": [ + "MLflow is an open-source platform", + "Apache Spark is an open-source, distributed computing system", + "Python is a high-level programming language", + ], + "prediction": [ + "MLflow is an open-source platform", + "Apache Spark is an open-source, distributed computing system", + "Python is a high-level programming language", + ], + } + ) + results = mlflow.evaluate( + data=eval_df, + evaluators="default", + targets="ground_truth", + predictions="prediction", + extra_metrics=[metric], + ) + + assert results.metrics == { + "correctness/v1/mean": 3.0, + "correctness/v1/variance": 0.0, + "correctness/v1/p90": 3.0, + } From 9a0985eb5076be2e9eda654b75bb5e9208bdfa98 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Fri, 20 Oct 2023 01:40:17 +0900 Subject: [PATCH 027/101] Cache `evaluate.load("toxicity")` (#10007) Signed-off-by: harupy --- mlflow/metrics/metric_definitions.py | 32 +++++++++++------------- tests/evaluate/test_default_evaluator.py | 5 +++- tests/metrics/test_metric_definitions.py | 4 ++- 3 files changed, 21 insertions(+), 20 deletions(-) diff --git a/mlflow/metrics/metric_definitions.py b/mlflow/metrics/metric_definitions.py index 54b4d57c0973b..1e3f2aed138ef 100644 --- a/mlflow/metrics/metric_definitions.py +++ b/mlflow/metrics/metric_definitions.py @@ -1,3 +1,4 @@ +import functools import logging import os @@ -53,14 +54,19 @@ def _token_count_eval_fn(predictions, targets, metrics): ) +@functools.lru_cache(maxsize=8) +def _cached_evaluate_load(path, module_type=None): + import evaluate + + return evaluate.load(path, module_type=module_type) + + def _toxicity_eval_fn(predictions, targets, metrics): if not _validate_text_data(predictions, "toxicity", "predictions"): return try: _logger.info("Loading toxicity metric:") - import evaluate - - toxicity = evaluate.load("toxicity", module_type="measurement") + toxicity = _cached_evaluate_load("toxicity", module_type="measurement") except Exception as e: _logger.warning( f"Failed to load 'toxicity' metric (error: {e!r}), skipping metric logging." @@ -87,9 +93,7 @@ def _perplexity_eval_fn(predictions, targets, metrics): try: _logger.info("Loading perplexity metric:") - import evaluate - - perplexity = evaluate.load("perplexity", module_type="metric") + perplexity = _cached_evaluate_load("perplexity", module_type="metric") except Exception as e: _logger.warning( f"Failed to load 'perplexity' metric (error: {e!r}), skipping metric logging." @@ -158,9 +162,7 @@ def _rouge1_eval_fn(predictions, targets, metrics): return try: - import evaluate - - rouge = evaluate.load("rouge") + rouge = _cached_evaluate_load("rouge") except Exception as e: _logger.warning( f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging." @@ -187,9 +189,7 @@ def _rouge2_eval_fn(predictions, targets, metrics): return try: - import evaluate - - rouge = evaluate.load("rouge") + rouge = _cached_evaluate_load("rouge") except Exception as e: _logger.warning( f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging." @@ -216,9 +216,7 @@ def _rougeL_eval_fn(predictions, targets, metrics): return try: - import evaluate - - rouge = evaluate.load("rouge") + rouge = _cached_evaluate_load("rouge") except Exception as e: _logger.warning( f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging." @@ -245,9 +243,7 @@ def _rougeLsum_eval_fn(predictions, targets, metrics): return try: - import evaluate - - rouge = evaluate.load("rouge") + rouge = _cached_evaluate_load("rouge") except Exception as e: _logger.warning( f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging." diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index 2e8ed1b79a9c3..9d2c84788cdf7 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -2486,7 +2486,10 @@ def test_evaluate_text_summarization_fails_to_load_evaluate_metrics(): ) data = pd.DataFrame({"text": ["a", "b"], "summary": ["a", "b"]}) - with mock.patch("evaluate.load", side_effect=ImportError("mocked error")) as mock_load: + with mock.patch( + "mlflow.metrics.metric_definitions._cached_evaluate_load", + side_effect=ImportError("mocked error"), + ) as mock_load: results = mlflow.evaluate( model_info.model_uri, data, diff --git a/tests/metrics/test_metric_definitions.py b/tests/metrics/test_metric_definitions.py index 52acf86f56cbd..a1712ed3b801b 100644 --- a/tests/metrics/test_metric_definitions.py +++ b/tests/metrics/test_metric_definitions.py @@ -174,7 +174,9 @@ def test_rougeLsum(): def test_fails_to_load_metric(): predictions = pd.Series(["random text", "This is a sentence"]) e = ImportError("mocked error") - with mock.patch("evaluate.load", side_effect=e) as mock_load: + with mock.patch( + "mlflow.metrics.metric_definitions._cached_evaluate_load", side_effect=e + ) as mock_load: with mock.patch("mlflow.metrics.metric_definitions._logger.warning") as mock_warning: toxicity().eval_fn(predictions, None, {}) mock_load.assert_called_once_with("toxicity", module_type="measurement") From 9666d38eb7b9a97b343d668ebf6830ce5b565a17 Mon Sep 17 00:00:00 2001 From: Liang Zhang Date: Thu, 19 Oct 2023 10:02:20 -0700 Subject: [PATCH 028/101] Update docs for evaluating a static dataset and a function (#10004) Signed-off-by: Liang Zhang --- docs/source/models.rst | 37 ++++++++++++++++++++++++++ docs/source/python_api/mlflow.data.rst | 2 +- mlflow/data/pandas_dataset.py | 8 +++--- 3 files changed, 42 insertions(+), 5 deletions(-) diff --git a/docs/source/models.rst b/docs/source/models.rst index cfa90ec197563..d94dc8f6bbae8 100644 --- a/docs/source/models.rst +++ b/docs/source/models.rst @@ -3660,6 +3660,43 @@ uses :py:func:`mlflow.evaluate()` with a custom metric function to evaluate the For a more comprehensive custom metrics usage example, refer to `this example from the MLflow GitHub Repository `_. +Evaluating with a Function +^^^^^^^^^^^^^^^^^^^^^^^^^^ +As of MLflow 2.8.0, :py:func:`mlflow.evaluate()` supports evaluating a python function without requiring +logging the model to MLflow. This is useful when you don't want to log the model and just want to evaluate +it. The following example uses :py:func:`mlflow.evaluate()` to evaluate a function: + + +.. literalinclude:: ../../examples/evaluation/evaluate_with_function.py + :language: python + +Evaluating with a Static Dataset +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +As of MLflow 2.8.0, :py:func:`mlflow.evaluate()` supports evaluating a static dataset without specifying a model. +This is useful when you save the model output to a column in a Pandas DataFrame or an MLflow PandasDataset, and +want to evaluate the static dataset without re-running the model. + +If you are using a Pandas DataFrame, you must specify the column name that contains the model output using the +top-level ``predictions`` parameter in :py:func:`mlflow.evaluate()`: + +.. code-block:: python + + mlflow.evaluate(data=pandas_df, predictions="model_output", ...) + +If you are using an MLflow PandasDataset, you must specify the column name that contains the model output using +the ``predictions`` parameter in :py:func:`mlflow.data.from_pandas()`, and specify ``None`` for the +``predictions`` parameter in :py:func:`mlflow.evaluate()`: + +.. code-block:: python + + dataset = mlflow.data.from_pandas(pandas_df, predictions="model_output") + mlflow.evaluate(data=pandas_df, predictions=None, ...) + +The following example uses :py:func:`mlflow.evaluate()` to evaluate a static dataset: + +.. literalinclude:: ../../examples/evaluation/evaluate_with_static_dataset.py + :language: python + .. _model-validation: Performing Model Validation diff --git a/docs/source/python_api/mlflow.data.rst b/docs/source/python_api/mlflow.data.rst index 8bd15534f9824..4913528d0cbf9 100644 --- a/docs/source/python_api/mlflow.data.rst +++ b/docs/source/python_api/mlflow.data.rst @@ -6,7 +6,7 @@ runs with MLflow Tracking, as well as retrieve dataset information from runs. It following important interfaces: * :py:class:`Dataset `: Represents a dataset used in model training or - evaluation, including features, targets, and metadata such as the dataset's name, digest (hash) + evaluation, including features, targets, predictions, and metadata such as the dataset's name, digest (hash) schema, profile, and source. You can log this metadata to a run in MLflow Tracking using the :py:func:`mlflow.log_input()` API. ``mlflow.data`` provides APIs for constructing :py:class:`Datasets ` from a variety of Python data objects, including diff --git a/mlflow/data/pandas_dataset.py b/mlflow/data/pandas_dataset.py index d68afbbd6c54f..3c765168d169a 100644 --- a/mlflow/data/pandas_dataset.py +++ b/mlflow/data/pandas_dataset.py @@ -173,7 +173,7 @@ def from_pandas( ) -> PandasDataset: """ Constructs a :py:class:`PandasDataset ` instance from - a Pandas DataFrame, optional targets, and source. + a Pandas DataFrame, optional targets, optional predictions, and source. :param df: A Pandas DataFrame. :param source: The source from which the DataFrame was derived, e.g. a filesystem @@ -200,10 +200,10 @@ def from_pandas( import pandas as pd x = pd.DataFrame( - [["tom", 10, 1], ["nick", 15, 0], ["juli", 14, 1]], - columns=["Name", "Age", "Label"], + [["tom", 10, 1, 1], ["nick", 15, 0, 1], ["juli", 14, 1, 1]], + columns=["Name", "Age", "Label", "ModelOutput"], ) - dataset = mlflow.data.from_pandas(x, targets="Label") + dataset = mlflow.data.from_pandas(x, targets="Label", predictions="ModelOutput") """ from mlflow.data.code_dataset_source import CodeDatasetSource from mlflow.data.dataset_source_registry import resolve_dataset_source From 155d299229f1b20d686422c8d677e4a7c7b540d4 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Thu, 19 Oct 2023 12:24:53 -0700 Subject: [PATCH 029/101] Improve understandability of EvaluationMetric (#9976) --- mlflow/metrics/genai/genai_metric.py | 53 ++++++++++++---------- mlflow/metrics/genai/prompt_template.py | 3 ++ mlflow/models/evaluation/base.py | 25 +++++++---- tests/metrics/genai/test_genai_metrics.py | 54 ++++++++++++++++------- 4 files changed, 86 insertions(+), 49 deletions(-) diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index 2d59d473f8d1e..80d4f2d70c130 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -183,6 +183,30 @@ def make_genai_metric( if aggregations is None: aggregations = ["mean", "variance", "p90"] + class_name = f"mlflow.metrics.genai.prompts.{version}.EvaluationModel" + try: + evaluation_model_class_module = _get_class_from_string(class_name) + except ModuleNotFoundError: + raise MlflowException( + f"Failed to find evaluation model for version {version}." + f"Please check the correctness of the version", + error_code=INVALID_PARAMETER_VALUE, + ) from None + except Exception as e: + raise MlflowException( + f"Failed to construct evaluation model {version}. Error: {e!r}", + error_code=INTERNAL_ERROR, + ) from None + + evaluation_context = evaluation_model_class_module( + name, + definition, + grading_prompt, + examples, + model, + *(parameters,) if parameters is not None else (), + ).to_dict() + def eval_fn( predictions: "pd.Series", metrics: Dict[str, MetricValue], @@ -194,29 +218,6 @@ def eval_fn( """ eval_values = dict(zip(grading_context_columns, args)) - class_name = f"mlflow.metrics.genai.prompts.{version}.EvaluationModel" - try: - evaluation_model_class_module = _get_class_from_string(class_name) - except ModuleNotFoundError: - raise MlflowException( - f"Failed to find evaluation model for version {version}." - f"Please check the correctness of the version", - error_code=INVALID_PARAMETER_VALUE, - ) from None - except Exception as e: - raise MlflowException( - f"Failed to construct evaluation model {version}. Error: {e!r}", - error_code=INTERNAL_ERROR, - ) from None - - evaluation_context = evaluation_model_class_module( - name, - definition, - grading_prompt, - examples, - model, - *(parameters,) if parameters is not None else (), - ).to_dict() outputs = predictions.to_list() inputs = inputs.to_list() @@ -328,5 +329,9 @@ def aggregate_function(aggregate_option, scores): eval_fn.__signature__ = Signature(signature_parameters) return make_metric( - eval_fn=eval_fn, greater_is_better=greater_is_better, name=name, version=version + eval_fn=eval_fn, + greater_is_better=greater_is_better, + name=name, + version=version, + metric_details=evaluation_context["eval_prompt"].__str__(), ) diff --git a/mlflow/metrics/genai/prompt_template.py b/mlflow/metrics/genai/prompt_template.py index 90870f9c913ff..64e814ab494da 100644 --- a/mlflow/metrics/genai/prompt_template.py +++ b/mlflow/metrics/genai/prompt_template.py @@ -59,3 +59,6 @@ def partial_fill(self, **kwargs: Any) -> "PromptTemplate": new_template_str = self.template_str.format_map(safe_dict) unfilled_variables = [var for var in self.variables if var not in kwargs.keys()] return PromptTemplate(template_str=new_template_str, variables=unfilled_variables) + + def __str__(self): + return self.template_str diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index b02c7a232b996..38867d83c8329 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -109,23 +109,30 @@ def eval_fn( :param long_name: (Optional) The long name of the metric. For example, ``"root_mean_squared_error"`` for ``"mse"``. :param version: (Optional) The metric version. For example ``v1``. + :param metric_details: (Optional) A description of the metric and how it is calculated. ''' - def __init__(self, eval_fn, name, greater_is_better, long_name=None, version=None): + def __init__( + self, eval_fn, name, greater_is_better, long_name=None, version=None, metric_details=None + ): self.eval_fn = eval_fn self.name = name self.greater_is_better = greater_is_better self.long_name = long_name or name self.version = version + self.metric_details = metric_details def __str__(self): + parts = [f"name={self.name}, greater_is_better={self.greater_is_better}"] + if self.long_name: - return ( - f"EvaluationMetric(name={self.name}, long_name={self.long_name}, " - f"greater_is_better={self.greater_is_better})" - ) - else: - return f"EvaluationMetric(name={self.name}, greater_is_better={self.greater_is_better})" + parts.append(f"long_name={self.long_name}") + if self.version: + parts.append(f"version={self.version}") + if self.metric_details: + parts.append(f"metric_details={self.metric_details}") + + return "EvaluationMetric(" + ", ".join(parts) + ")" def make_metric( @@ -135,6 +142,7 @@ def make_metric( name=None, long_name=None, version=None, + metric_details=None, ): ''' A factory function to create an :py:class:`EvaluationMetric` object. @@ -175,6 +183,7 @@ def eval_fn( :param long_name: (Optional) The long name of the metric. For example, ``"mean_squared_error"`` for ``"mse"``. :param version: (Optional) The metric version. For example ``v1``. + :param metric_details: (Optional) A description of the metric and how it is calculated. .. seealso:: @@ -194,7 +203,7 @@ def eval_fn( ) name = eval_fn.__name__ - return EvaluationMetric(eval_fn, name, greater_is_better, long_name, version) + return EvaluationMetric(eval_fn, name, greater_is_better, long_name, version, metric_details) @developer_stable diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index 5d94fa3398e8d..a61e556db2cd4 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -378,18 +378,6 @@ def test_make_genai_metric_failure(): ) import pandas as pd - custom_metric1 = make_genai_metric( - name="correctness", - version="v-latest", - definition="definition", - grading_prompt="grading_prompt", - examples=[example], - model="model", - grading_context_columns=["targets"], - parameters={"temperature": 0.0}, - greater_is_better=True, - aggregations=["mean"], - ) with pytest.raises( MlflowException, match=re.escape( @@ -397,11 +385,17 @@ def test_make_genai_metric_failure(): "Please check the correctness of the version" ), ): - custom_metric1.eval_fn( - pd.Series(["predictions"]), - {}, - pd.Series(["What is MLflow?"]), - pd.Series(["truth"]), + make_genai_metric( + name="correctness", + version="v-latest", + definition="definition", + grading_prompt="grading_prompt", + examples=[example], + model="model", + grading_context_columns=["targets"], + parameters={"temperature": 0.0}, + greater_is_better=True, + aggregations=["mean"], ) with mock.patch.object( @@ -696,3 +690,29 @@ def test_strict_correctness_metric(): match="Failed to find strict correctness metric for version non-existent-version", ): strict_correctness_metric = strict_correctness(metric_version="non-existent-version") + + +def test_make_genai_metric_metric_details(): + custom_metric = make_genai_metric( + name="correctness", + version="v1", + definition=example_definition, + grading_prompt=example_grading_prompt, + examples=[mlflow_example], + model="gateway:/gpt-3.5-turbo", + grading_context_columns=["targets"], + parameters={"temperature": 0.0}, + greater_is_better=True, + aggregations=["mean", "variance", "p90"], + ) + + # pylint: disable=line-too-long + expected_metric_details = "\nTask:\nYou are an impartial judge. You will be given an input that was sent to a machine\nlearning model, and you will be given an output that the model produced. You\nmay also be given additional information that was used by the model to generate the output.\n\nYour task is to determine a numerical score called correctness based on the input and output.\nA definition of correctness and a grading rubric are provided below.\nYou must use the grading rubric to determine your score. You must also justify your score.\n\nExamples could be included below for reference. Make sure to use them as references and to\nunderstand them before completing the task.\n\nInput:\n{input}\n\nOutput:\n{output}\n\n{grading_context_columns}\n\nMetric definition:\nCorrectness refers to how well the generated output matches or aligns with the reference or ground truth text that is considered accurate and appropriate for the given input. The ground truth serves as a benchmark against which the provided output is compared to determine the level of accuracy and fidelity.\n\nGrading rubric:\nCorrectness: If the answer correctly answer the question, below are the details for different scores: - Score 0: the answer is completely incorrect, doesn’t mention anything about the question or is completely contrary to the correct answer. - Score 1: the answer provides some relevance to the question and answer one aspect of the question correctly. - Score 2: the answer mostly answer the question but is missing or hallucinating on one critical aspect. - Score 4: the answer correctly answer the question and not missing any major aspect\n\nExamples:\n\nInput:\nWhat is MLflow?\n\nOutput:\nMLflow is an open-source platform for managing machine learning workflows, including experiment tracking, model packaging, versioning, and deployment, simplifying the ML lifecycle.\n\nAdditional information used by the model:\nkey: targets\nvalue:\nMLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, a company that specializes in big data and machine learning solutions. MLflow is designed to address the challenges that data scientists and machine learning engineers face when developing, training, and deploying machine learning models.\n\nscore: 4\njustification: The definition effectively explains what MLflow is its purpose, and its developer. It could be more concise for a 5-score.\n \n\nYou must return the following fields in your response one below the other:\nscore: Your numerical score for the model's correctness based on the rubric\njustification: Your step-by-step reasoning about the model's correctness score\n " + + assert custom_metric.metric_details == expected_metric_details + + assert ( + custom_metric.__str__() + == f"EvaluationMetric(name=correctness, greater_is_better=True, long_name=correctness, version=v1, metric_details={expected_metric_details})" + ) + # pylint: enable=line-too-long From ae836587b01809113a2c7cfaf828890f845af85d Mon Sep 17 00:00:00 2001 From: Sunish Sheth Date: Thu, 19 Oct 2023 13:34:03 -0700 Subject: [PATCH 030/101] Adding type checking for extra_metrics in mlflow.evaluate (#10017) Signed-off-by: Sunish Sheth --- mlflow/models/evaluation/base.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 38867d83c8329..88a40b34c1dfe 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -14,7 +14,7 @@ from contextlib import contextmanager from decimal import Decimal from types import FunctionType -from typing import Any, Dict, Optional +from typing import Any, Dict, List, Optional import mlflow from mlflow.data.dataset import Dataset @@ -1164,7 +1164,7 @@ def evaluate( evaluators=None, evaluator_config=None, custom_metrics=None, - extra_metrics=None, + extra_metrics: Optional[List[EvaluationMetric]] = None, custom_artifacts=None, validation_thresholds=None, baseline_model=None, From c8926d0a230e164eff8773f28e3357f2e82f3d9e Mon Sep 17 00:00:00 2001 From: Liang Zhang Date: Thu, 19 Oct 2023 14:19:54 -0700 Subject: [PATCH 031/101] Fix latency and predictions parameter (#9983) Signed-off-by: Liang Zhang --- mlflow/models/evaluation/base.py | 70 ++++++++--- mlflow/models/evaluation/default_evaluator.py | 83 ++++++++---- tests/evaluate/test_default_evaluator.py | 119 ++++++++++++++++-- tests/evaluate/test_evaluation.py | 40 ++++-- 4 files changed, 255 insertions(+), 57 deletions(-) diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 88a40b34c1dfe..9a32d2af0b4d7 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -778,6 +778,7 @@ def evaluate( extra_metrics=None, custom_artifacts=None, baseline_model=None, + predictions=None, **kwargs, ): """ @@ -802,6 +803,9 @@ def evaluate( flavor as a baseline model to be compared with the candidate model (specified by the `model` param) for model validation. (pyfunc model instance is not allowed) + :param predictions: The column name of the model output column that is used for evaluation. + This is only used when a model returns a pandas dataframe that contains + multiple columns. :return: A :py:class:`mlflow.models.EvaluationResult` instance containing evaluation metrics for candidate model and baseline model and artifacts for candidate model. @@ -1075,6 +1079,7 @@ def _evaluate( extra_metrics, custom_artifacts, baseline_model, + predictions, ): """ The public API "evaluate" will verify argument first, and then pass normalized arguments @@ -1116,6 +1121,7 @@ def _evaluate( extra_metrics=extra_metrics, custom_artifacts=custom_artifacts, baseline_model=baseline_model, + predictions=predictions, ) eval_results.append(eval_result) @@ -1437,8 +1443,32 @@ def fn(model_input): ``data`` is a :py:class:`mlflow.data.dataset.Dataset` that defines targets, then ``targets`` is optional. - :param predictions: Optional. Only used when ``model`` is not specified and ``data`` is a pandas - dataframe. The name of the column in ``data`` that contains model outputs. + :param predictions: Optional. The name of the column that contains model outputs. There are two + cases where this argument is required: + + - When ``model`` is specified and outputs multiple columns. The + ``predictions`` should be the name of the column that is used for + evaluation. + - When ``model`` is not specified and ``data`` is a pandas dataframe. The + ``predictions`` should be the name of the column in ``data`` that + contains model outputs. + + .. code-block:: python + :caption: Example usage of predictions + + # Evaluate a model that outputs multiple columns + data = pd.DataFrame({"question": ["foo"]}) + + + def model(inputs): + return pd.DataFrame({"answer": ["bar"], "source": ["baz"]}) + + + results = evalaute(model=model, data=data, predictions="answer", ...) + + # Evaluate a static dataset + data = pd.DataFrame({"question": ["foo"], "answer": ["bar"], "source": ["baz"]}) + results = evalaute(data=data, predictions="answer", ...) :param model_type: (Optional) A string describing the model type. The default evaluator supports the following model types: @@ -1642,16 +1672,10 @@ def pred_sample(eval_df, _builtin_metrics, _artifacts_dir): message="The data argument cannot be None.", error_code=INVALID_PARAMETER_VALUE ) - if predictions is not None and model is not None: - raise MlflowException( - message="The predictions argument cannot be specified when model is specified.", - error_code=INVALID_PARAMETER_VALUE, - ) - _EnvManager.validate(env_manager) - # If Dataset is provided, the targets and predictions can only be specified by the Dataset, - # not the targets and predictions parameters of the mlflow.evaluate() API. + # If Dataset is provided, the targets can only be specified by the Dataset, + # not the targets parameters of the mlflow.evaluate() API. if isinstance(data, Dataset) and targets is not None: raise MlflowException( message="The top-level targets parameter should not be specified since a Dataset " @@ -1660,7 +1684,9 @@ def pred_sample(eval_df, _builtin_metrics, _artifacts_dir): "Meanwhile, please specify `mlflow.evaluate(..., targets=None, ...)`.", error_code=INVALID_PARAMETER_VALUE, ) - if isinstance(data, Dataset) and predictions is not None: + # If Dataset is provided and model is None, then the predictions can only be specified by the + # Dataset, not the predictions parameters of the mlflow.evaluate() API. + if isinstance(data, Dataset) and model is None and predictions is not None: raise MlflowException( message="The top-level predictions parameter should not be specified since a Dataset " "is used. Please only specify the predictions column name in the Dataset. For example:" @@ -1668,6 +1694,17 @@ def pred_sample(eval_df, _builtin_metrics, _artifacts_dir): "Meanwhile, please specify `mlflow.evaluate(..., predictions=None, ...)`.", error_code=INVALID_PARAMETER_VALUE, ) + # If Dataset is provided and model is specified, then the data.predictions cannot be specified. + if ( + isinstance(data, Dataset) + and model is not None + and getattr(data, "predictions", None) is not None + ): + raise MlflowException( + message="The predictions parameter should not be specified in the Dataset since a " + "model is specified. Please remove the predictions column from the Dataset.", + error_code=INVALID_PARAMETER_VALUE, + ) if model_type in [_ModelType.REGRESSOR, _ModelType.CLASSIFIER]: if isinstance(data, Dataset): @@ -1788,9 +1825,12 @@ def pred_sample(eval_df, _builtin_metrics, _artifacts_dir): with _start_run_or_reuse_active_run() as run_id: if not isinstance(data, Dataset): # Convert data to `mlflow.data.dataset.Dataset`. - data = _convert_data_to_mlflow_dataset( - data=data, targets=targets, predictions=predictions - ) + if model is None: + data = _convert_data_to_mlflow_dataset( + data=data, targets=targets, predictions=predictions + ) + else: + data = _convert_data_to_mlflow_dataset(data=data, targets=targets) from mlflow.data.pyfunc_dataset_mixin import PyFuncConvertibleDatasetMixin @@ -1811,6 +1851,7 @@ def pred_sample(eval_df, _builtin_metrics, _artifacts_dir): path=dataset_path, feature_names=feature_names, ) + predictions_expected_in_model_output = predictions if model is not None else None try: evaluate_result = _evaluate( @@ -1824,6 +1865,7 @@ def pred_sample(eval_df, _builtin_metrics, _artifacts_dir): extra_metrics=extra_metrics, custom_artifacts=custom_artifacts, baseline_model=baseline_model, + predictions=predictions_expected_in_model_output, ) finally: if isinstance(model, _ServedPyFuncModel): diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index 86d99e1deebd8..35c55b034461d 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -61,7 +61,6 @@ _DEFAULT_SAMPLE_ROWS_FOR_SHAP = 2000 _EVAL_TABLE_FILE_NAME = "eval_results_table.json" -_Y_PREDICTED_OUTPUT_COLUMN_NAME = "predicted_column" _TOKEN_COUNT_METRIC_NAME = "token_count" _LATENCY_METRIC_NAME = "latency" @@ -444,6 +443,10 @@ def _get_aggregate_metrics_values(metrics): def _extract_output_and_other_columns(model_predictions, output_column_name): y_pred = None other_output_columns = None + ERROR_MISSING_OUTPUT_COLUMN_NAME = ( + "Output column name is not specified for the multi-output model. " + "Please set the correct output column name using the `predictions` parameter." + ) if isinstance(model_predictions, list) and all(isinstance(p, dict) for p in model_predictions): # Extract 'y_pred' and 'other_output_columns' from list of dictionaries @@ -455,20 +458,32 @@ def _extract_output_and_other_columns(model_predictions, output_column_name): [{k: v for k, v in p.items() if k != output_column_name} for p in model_predictions] ) elif len(model_predictions) > 1: + if output_column_name is None: + raise MlflowException( + ERROR_MISSING_OUTPUT_COLUMN_NAME, + error_code=INVALID_PARAMETER_VALUE, + ) raise MlflowException( f"Output column name '{output_column_name}' is not found in the model " f"predictions list: {model_predictions}. Please set the correct output column " - "name using the `predicted_column` parameter in evaluator config." + "name using the `predictions` parameter.", + error_code=INVALID_PARAMETER_VALUE, ) elif isinstance(model_predictions, pd.DataFrame): if output_column_name in model_predictions.columns: y_pred = model_predictions[output_column_name] other_output_columns = model_predictions.drop(columns=output_column_name) elif model_predictions.shape[1] > 1: + if output_column_name is None: + raise MlflowException( + ERROR_MISSING_OUTPUT_COLUMN_NAME, + error_code=INVALID_PARAMETER_VALUE, + ) raise MlflowException( f"Output column name '{output_column_name}' is not found in the model " f"predictions dataframe {model_predictions.columns}. Please set the correct " - "output column name using the `predicted_column` parameter in evaluator config." + "output column name using the `predictions` parameter.", + error_code=INVALID_PARAMETER_VALUE, ) elif isinstance(model_predictions, dict): if output_column_name in model_predictions: @@ -477,10 +492,16 @@ def _extract_output_and_other_columns(model_predictions, output_column_name): {k: v for k, v in model_predictions.items() if k != output_column_name} ) elif len(model_predictions) > 1: + if output_column_name is None: + raise MlflowException( + ERROR_MISSING_OUTPUT_COLUMN_NAME, + error_code=INVALID_PARAMETER_VALUE, + ) raise MlflowException( f"Output column name '{output_column_name}' is not found in the " f"model predictions dict {model_predictions}. Please set the correct " - "output column name using the `predicted_column` parameter in evaluator config." + "output column name using the `predictions` parameter.", + error_code=INVALID_PARAMETER_VALUE, ) return y_pred if y_pred is not None else model_predictions, other_output_columns @@ -1160,23 +1181,27 @@ def _get_args_for_metrics(self, extra_metric, eval_df): ): eval_fn_args.append(self.other_output_columns[column]) elif param.default == inspect.Parameter.empty: - output_column_name = self.evaluator_config.get( - _Y_PREDICTED_OUTPUT_COLUMN_NAME, "output" - ) + output_column_name = self.predictions if self.other_output_columns: output_columns = list(self.other_output_columns.columns) else: output_columns = [] input_columns = list(input_df.columns) + msg_output_columns = ( + ( + "Note that this does not include the output column: " + f"'{output_column_name}'\n\n" + ) + if output_column_name is not None + else "" + ) raise MlflowException( "Error: Metric Calculation Failed\n" f"Metric '{extra_metric.name}' requires the column '{param_name}' to " "be defined in either the input data or resulting output data.\n\n" "Below are the existing column names for the input/output data:\n" f"Input Columns: {input_columns}\n" - f"Output Columns: {output_columns}\n" - "Note that this does not include the output column: " - f"'{output_column_name}'\n\n" + f"Output Columns: {output_columns}\n{msg_output_columns}" f"To resolve this issue, you may want to map {param_name} to an " "existing column using the following configuration:\n" f"evaluator_config={{'col_mapping': {{'{param_name}': " @@ -1311,20 +1336,28 @@ def predict_with_latency(X_copy): ) X_copy = self.X.copy_to_avoid_mutation() - if compute_latency: - model_predictions = predict_with_latency(X_copy) - else: - if self.model is not None: - model_predictions = self.model.predict(X_copy) + if self.model is not None: + if compute_latency: + model_predictions = predict_with_latency(X_copy) else: - if self.dataset.predictions_data is None: - raise MlflowException( - message="Predictions data is missing when model is not provided. " - "Please provide predictions data in the pandas dataset or provide " - "a model.", - error_code=INVALID_PARAMETER_VALUE, - ) - model_predictions = self.dataset.predictions_data + model_predictions = self.model.predict(X_copy) + else: + if self.dataset.predictions_data is None: + raise MlflowException( + message="Predictions data is missing when model is not provided. " + "Please provide predictions data in a dataset or provide a model. " + "See the documentation for mlflow.evaluate() for how to specify " + "the predictions data in a dataset.", + error_code=INVALID_PARAMETER_VALUE, + ) + if compute_latency: + _logger.warning( + "Setting the latency to 0 for all entries because the model " "is not provided." + ) + self.metrics_values.update( + {_LATENCY_METRIC_NAME: MetricValue(scores=[0.0] * len(X_copy))} + ) + model_predictions = self.dataset.predictions_data if self.model_type == _ModelType.CLASSIFIER: self.label_list = np.unique(self.y) @@ -1359,7 +1392,7 @@ def predict_with_latency(X_copy): else: self.y_probs = None - output_column_name = self.evaluator_config.get(_Y_PREDICTED_OUTPUT_COLUMN_NAME, "output") + output_column_name = self.predictions self.y_pred, self.other_output_columns = _extract_output_and_other_columns( model_predictions, output_column_name ) @@ -1602,6 +1635,7 @@ def evaluate( extra_metrics=None, custom_artifacts=None, baseline_model=None, + predictions=None, **kwargs, ): self.dataset = dataset @@ -1612,6 +1646,7 @@ def evaluate( self.custom_artifacts = custom_artifacts self.y = dataset.labels_data + self.predictions = predictions self.col_mapping = self.evaluator_config.get("col_mapping", {}) self.pos_label = self.evaluator_config.get("pos_label") self.sample_weights = self.evaluator_config.get("sample_weights") diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index 9d2c84788cdf7..95301616a8633 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -2763,6 +2763,7 @@ def test_eval_df(predictions, targets, metrics, inputs, truth, context): model_info.model_uri, data, targets="targets", + predictions="output", model_type="text", extra_metrics=[make_metric(eval_fn=test_eval_df, greater_is_better=True)], custom_artifacts=[example_custom_artifact], @@ -2862,26 +2863,65 @@ def word_count_eval(predictions, targets, metrics): ] -def identity_model(inputs): - return inputs +def multi_output_model(inputs): + return pd.DataFrame( + { + "answer": ["words random", "This is a sentence."], + "source": ["words random", "This is a sentence."], + } + ) def test_default_metrics_as_custom_metrics(): with mlflow.start_run() as run: model_info = mlflow.pyfunc.log_model( - artifact_path="model", python_model=identity_model, input_example=["a", "b"] + artifact_path="model", python_model=multi_output_model, input_example=["a"] ) data = pd.DataFrame( { "question": ["words random", "This is a sentence."], "truth": ["words random", "This is a sentence."], - "answer": ["words random", "This is a sentence."], } ) results = evaluate( model_info.model_uri, data, targets="truth", + predictions="answer", + model_type="question-answering", + custom_metrics=[ + mlflow.metrics.flesch_kincaid_grade_level(), + mlflow.metrics.perplexity(), + mlflow.metrics.ari_grade_level(), + mlflow.metrics.toxicity(), + mlflow.metrics.exact_match(), + ], + evaluators="default", + ) + + client = mlflow.MlflowClient() + artifacts = [a.path for a in client.list_artifacts(run.info.run_id)] + assert "eval_results_table.json" in artifacts + for metric in ["toxicity", "perplexity", "ari_grade_level", "flesch_kincaid_grade_level"]: + for measure in ["mean", "p90", "variance"]: + assert f"{metric}/v1/{measure}" in results.metrics.keys() + assert "exact_match/v1" in results.metrics.keys() + + +def test_default_metrics_as_custom_metrics_static_dataset(): + with mlflow.start_run() as run: + data = pd.DataFrame( + { + "question": ["words random", "This is a sentence."], + "truth": ["words random", "This is a sentence."], + "answer": ["words random", "This is a sentence."], + "source": ["words random", "This is a sentence."], + } + ) + results = evaluate( + data=data, + targets="truth", + predictions="answer", model_type="question-answering", custom_metrics=[ mlflow.metrics.flesch_kincaid_grade_level(), @@ -2891,9 +2931,6 @@ def test_default_metrics_as_custom_metrics(): mlflow.metrics.exact_match(), ], evaluators="default", - evaluator_config={ - "predicted_column": "answer", - }, ) client = mlflow.MlflowClient() @@ -2905,6 +2942,37 @@ def test_default_metrics_as_custom_metrics(): assert "exact_match/v1" in results.metrics.keys() +def test_multi_output_model_error_handling(): + with mlflow.start_run(): + model_info = mlflow.pyfunc.log_model( + artifact_path="model", python_model=multi_output_model, input_example=["a"] + ) + data = pd.DataFrame( + { + "question": ["words random", "This is a sentence."], + "truth": ["words random", "This is a sentence."], + } + ) + with pytest.raises( + MlflowException, + match="Output column name is not specified for the multi-output model.", + ): + evaluate( + model_info.model_uri, + data, + targets="truth", + model_type="question-answering", + custom_metrics=[ + mlflow.metrics.flesch_kincaid_grade_level(), + mlflow.metrics.perplexity(), + mlflow.metrics.ari_grade_level(), + mlflow.metrics.toxicity(), + mlflow.metrics.exact_match(), + ], + evaluators="default", + ) + + def test_evaluate_with_latency(): with mlflow.start_run() as run: model_info = mlflow.pyfunc.log_model( @@ -2936,6 +3004,43 @@ def test_evaluate_with_latency(): assert all(isinstance(grade, float) for grade in logged_data["latency"]) +def test_evaluate_with_latency_static_dataset(): + with mlflow.start_run() as run: + mlflow.pyfunc.log_model( + artifact_path="model", python_model=language_model, input_example=["a", "b"] + ) + data = pd.DataFrame( + { + "text": ["foo", "bar"], + "model_output": ["FOO", "BAR"], + } + ) + results = mlflow.evaluate( + data=data, + model_type="text", + evaluators="default", + predictions="model_output", + extra_metrics=[mlflow.metrics.latency()], + ) + + client = mlflow.MlflowClient() + artifacts = [a.path for a in client.list_artifacts(run.info.run_id)] + assert "eval_results_table.json" in artifacts + logged_data = pd.DataFrame(**results.artifacts["eval_results_table"].content) + assert set(logged_data.columns.tolist()) == { + "text", + "outputs", + "toxicity/v1/score", + "flesch_kincaid_grade_level/v1/score", + "ari_grade_level/v1/score", + "perplexity/v1/score", + "latency", + "token_count", + } + assert all(isinstance(grade, float) for grade in logged_data["latency"]) + assert all(grade == 0.0 for grade in logged_data["latency"]) + + properly_formatted_openai_response1 = { "candidates": [ { diff --git a/tests/evaluate/test_evaluation.py b/tests/evaluate/test_evaluation.py index 8b61f979fe433..5b2c7a1fc20d4 100644 --- a/tests/evaluate/test_evaluation.py +++ b/tests/evaluate/test_evaluation.py @@ -910,6 +910,7 @@ def test_evaluator_evaluation_interface(multiclass_logistic_regressor_model_uri, extra_metrics=None, custom_artifacts=None, baseline_model=None, + predictions=None, ) @@ -991,6 +992,7 @@ def get_evaluate_call_arg(model, evaluator_config): "custom_metrics": None, "custom_artifacts": None, "baseline_model": baseline_model, + "predictions": None, } # evaluators = None is the case evaluators unspecified, it should fetch all registered @@ -1370,6 +1372,32 @@ def test_evaluate_with_targets_error_handling(): ) +def test_evaluate_with_predictions_error_handling(): + import lightgbm as lgb + + X, y = sklearn.datasets.load_diabetes(return_X_y=True, as_frame=True) + X = X[::5] + y = y[::5] + lgb_data = lgb.Dataset(X, label=y) + model = lgb.train({"objective": "regression"}, lgb_data, num_boost_round=5) + mlflow_dataset_with_predictions = mlflow.data.from_pandas( + df=X.assign(y=y, model_output=y), + targets="y", + predictions="model_output", + ) + with mlflow.start_run(): + with pytest.raises( + MlflowException, + match="The predictions parameter should not be specified in the Dataset since a model " + "is specified. Please remove the predictions column from the Dataset.", + ): + mlflow.evaluate( + model=model, + data=mlflow_dataset_with_predictions, + model_type="regressor", + ) + + def test_evaluate_with_function_input_single_output(): import lightgbm as lgb @@ -1509,18 +1537,6 @@ def test_evaluate_with_static_dataset_error_handling_pandas_dataframe(): model_type="regressor", ) - with pytest.raises( - MlflowException, - match="The predictions argument cannot be specified when model is specified.", - ): - mlflow.evaluate( - model="models:/test", - data=X.assign(y=y, model_output=y).to_numpy(), - targets="y", - predictions="model_output", - model_type="regressor", - ) - with pytest.raises(MlflowException, match="The data argument cannot be None."): mlflow.evaluate( data=None, From cd2b3febf5011a4cfcdadb81f56855fc02ad77d9 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Thu, 19 Oct 2023 16:17:26 -0700 Subject: [PATCH 032/101] Fail fast for mlflow.evaluate() (#9929) Signed-off-by: Ann Zhang --- mlflow/metrics/genai/genai_metric.py | 24 +++- mlflow/metrics/metric_definitions.py | 8 -- mlflow/models/evaluation/default_evaluator.py | 134 ++++++++++++------ tests/evaluate/test_default_evaluator.py | 62 ++++++-- tests/metrics/genai/test_genai_metrics.py | 17 ++- tests/metrics/test_metric_definitions.py | 4 + 6 files changed, 175 insertions(+), 74 deletions(-) diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index 80d4f2d70c130..c763ddbf5f009 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -84,9 +84,9 @@ def make_genai_metric( examples: Optional[List[EvaluationExample]] = None, version: Optional[str] = _get_latest_metric_version(), model: Optional[str] = "openai:/gpt-3.5-turbo-16k", - grading_context_columns: Optional[List[str]] = None, + grading_context_columns: Optional[List[str]] = [], # noqa: B006 parameters: Optional[Dict[str, Any]] = None, - aggregations: Optional[List[str]] = None, + aggregations: Optional[List[str]] = ["mean", "variance", "p90"], # noqa: B006 greater_is_better: bool = True, max_workers: int = 10, judge_request_timeout: int = 60, @@ -180,9 +180,6 @@ def make_genai_metric( ) """ - if aggregations is None: - aggregations = ["mean", "variance", "p90"] - class_name = f"mlflow.metrics.genai.prompts.{version}.EvaluationModel" try: evaluation_model_class_module = _get_class_from_string(class_name) @@ -216,7 +213,6 @@ def eval_fn( """ This is the function that is called when the metric is evaluated. """ - eval_values = dict(zip(grading_context_columns, args)) outputs = predictions.to_list() @@ -244,7 +240,21 @@ def score_model_on_one_payload( eval_parameters, eval_model, ): - arg_string = _format_args_string(grading_context_columns, eval_values, indx) + try: + arg_string = _format_args_string(grading_context_columns, eval_values, indx) + except Exception as e: + raise MlflowException( + f"Values for grading_context_columns are malformed and cannot be " + f"formatted into a prompt for metric '{name}'.\n" + f"Required columns: {grading_context_columns}\n" + f"Values: {eval_values}\n" + f"Error: {e!r}\n" + f"Please check the following: \n" + "- predictions and targets (if required) are provided correctly\n" + "- grading_context_columns are mapped correctly using the evaluator_config " + "parameter\n" + "- input and output data are formatted correctly." + ) payload = { "prompt": evaluation_context["eval_prompt"].format( input=input, output=output, grading_context_columns=arg_string diff --git a/mlflow/metrics/metric_definitions.py b/mlflow/metrics/metric_definitions.py index 1e3f2aed138ef..46a3937e77c91 100644 --- a/mlflow/metrics/metric_definitions.py +++ b/mlflow/metrics/metric_definitions.py @@ -40,8 +40,6 @@ def _token_count_eval_fn(predictions, targets, metrics): os.environ["TIKTOKEN_CACHE_DIR"] = "" encoding = tiktoken.get_encoding("cl100k_base") - _logger.info("Computing token count metric:") - num_tokens = [] for prediction in predictions: if isinstance(prediction, str): @@ -65,7 +63,6 @@ def _toxicity_eval_fn(predictions, targets, metrics): if not _validate_text_data(predictions, "toxicity", "predictions"): return try: - _logger.info("Loading toxicity metric:") toxicity = _cached_evaluate_load("toxicity", module_type="measurement") except Exception as e: _logger.warning( @@ -73,7 +70,6 @@ def _toxicity_eval_fn(predictions, targets, metrics): ) return - _logger.info("Computing toxicity metric:") scores = toxicity.compute(predictions=predictions)["toxicity"] toxicity_ratio = toxicity.compute(predictions=predictions, aggregation="ratio")[ "toxicity_ratio" @@ -92,7 +88,6 @@ def _perplexity_eval_fn(predictions, targets, metrics): return try: - _logger.info("Loading perplexity metric:") perplexity = _cached_evaluate_load("perplexity", module_type="metric") except Exception as e: _logger.warning( @@ -100,7 +95,6 @@ def _perplexity_eval_fn(predictions, targets, metrics): ) return - _logger.info("Computing perplexity metric:") scores = perplexity.compute(predictions=predictions, model_id="gpt2")["perplexities"] return MetricValue( scores=scores, @@ -118,7 +112,6 @@ def _flesch_kincaid_eval_fn(predictions, targets, metrics): _logger.warning("Failed to load flesch kincaid metric, skipping metric logging.") return - _logger.info("Computing flesch kincaid metric:") scores = [textstat.flesch_kincaid_grade(prediction) for prediction in predictions] return MetricValue( scores=scores, @@ -138,7 +131,6 @@ def _ari_eval_fn(predictions, targets, metrics): ) return - _logger.info("Computing automated readability index metric:") scores = [textstat.automated_readability_index(prediction) for prediction in predictions] return MetricValue( scores=scores, diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index 35c55b034461d..a7a011958d8ad 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1151,6 +1151,7 @@ def _get_args_for_metrics(self, extra_metric, eval_df): input_df = self.X.copy_to_avoid_mutation() parameters = inspect.signature(extra_metric.eval_fn).parameters eval_fn_args = [] + params_not_found = [] if len(parameters) == 2: eval_fn_args.append(eval_df_copy) if "metrics" in parameters.keys(): @@ -1181,38 +1182,13 @@ def _get_args_for_metrics(self, extra_metric, eval_df): ): eval_fn_args.append(self.other_output_columns[column]) elif param.default == inspect.Parameter.empty: - output_column_name = self.predictions - if self.other_output_columns: - output_columns = list(self.other_output_columns.columns) - else: - output_columns = [] - input_columns = list(input_df.columns) - msg_output_columns = ( - ( - "Note that this does not include the output column: " - f"'{output_column_name}'\n\n" - ) - if output_column_name is not None - else "" - ) - raise MlflowException( - "Error: Metric Calculation Failed\n" - f"Metric '{extra_metric.name}' requires the column '{param_name}' to " - "be defined in either the input data or resulting output data.\n\n" - "Below are the existing column names for the input/output data:\n" - f"Input Columns: {input_columns}\n" - f"Output Columns: {output_columns}\n{msg_output_columns}" - f"To resolve this issue, you may want to map {param_name} to an " - "existing column using the following configuration:\n" - f"evaluator_config={{'col_mapping': {{'{param_name}': " - "''}}\n" - ) + params_not_found.append(param_name) + if len(params_not_found) > 0: + return extra_metric.name, params_not_found return eval_fn_args def _evaluate_extra_metrics(self, eval_df): - if not self.extra_metrics: - return for index, extra_metric in enumerate(self.extra_metrics): eval_fn_args = self._get_args_for_metrics(extra_metric, eval_df) _logger.info(f"Evaluating metrics: {extra_metric.name}") @@ -1438,9 +1414,81 @@ def _compute_builtin_metrics(self): ) ) + def _check_args(self, metrics, eval_df): + failed_metrics = [] + # collect all failures for getting metric arguments + for metric in metrics: + result = self._get_args_for_metrics(metric, eval_df) + if isinstance(result, tuple): + failed_metrics.append(result) + + if len(failed_metrics) > 0: + output_column_name = self.predictions + output_columns = ( + [] if self.other_output_columns is None else list(self.other_output_columns.columns) + ) + input_columns = list(self.X.copy_to_avoid_mutation().columns) + + error_messages = [] + for metric_name, param_names in failed_metrics: + error_messages.append(f"Metric '{metric_name}' requires the columns {param_names}") + error_message = "\n".join(error_messages) + raise MlflowException( + "Error: Metric calculation failed for the following metrics:\n" + f"{error_message}\n\n" + "Below are the existing column names for the input/output data:\n" + f"Input Columns: {input_columns}\n" + f"Output Columns: {output_columns}\n" + "Note that this does not include the output column: " + f"'{output_column_name}'\n\n" + f"To resolve this issue, you may want to map the missing column to an " + "existing column using the following configuration:\n" + f"evaluator_config={{'col_mapping': {{'': " + "''}}\n" + ) + + def _test_first_row(self, eval_df): + # test calculations on first row of eval_df + exceptions = [] + first_row_df = eval_df.iloc[[0]] + for metric in self.builtin_metrics: + try: + eval_fn_args = self._get_args_for_metrics(metric, first_row_df) + metric_value = metric.eval_fn(*eval_fn_args) + + # need to update metrics because they might be used in calculating extra_metrics + if metric_value: + name = f"{metric.name}/{metric.version}" if metric.version else metric.name + self.metrics_values.update({name: metric_value}) + except Exception as e: + if isinstance(e, MlflowException): + exceptions.append(f"Metric '{metric.name}': Error:\n{e.message}") + else: + exceptions.append(f"Metric '{metric.name}': Error:\n{e!r}") + self._update_metrics() + for metric in self.extra_metrics: + try: + eval_fn_args = self._get_args_for_metrics(metric, first_row_df) + metric.eval_fn(*eval_fn_args) + except Exception as e: + if isinstance(e, MlflowException): + exceptions.append(f"Metric '{metric.name}': Error:\n{e.message}") + else: + exceptions.append(f"Metric '{metric.name}': Error:\n{e!r}") + + if len(exceptions) > 0: + raise MlflowException("\n".join(exceptions)) + + def _evaluate_metrics(self, eval_df): + self._check_args(self.builtin_metrics + self.extra_metrics, eval_df) + self._test_first_row(eval_df) + + # calculate metrics for the full eval_df + self._evaluate_builtin_metrics(eval_df) + self._update_metrics() + self._evaluate_extra_metrics(eval_df) + def _evaluate_builtin_metrics(self, eval_df): - if not self.builtin_metrics: - return for builtin_metric in self.builtin_metrics: _logger.info(f"Evaluating builtin metrics: {builtin_metric.name}") @@ -1550,7 +1598,7 @@ def _evaluate( self.artifacts = {} self.metrics = {} self.metrics_values = {} - self.builtin_metrics = {} + self.builtin_metrics = [] text_metrics = [ token_count(), @@ -1562,15 +1610,14 @@ def _evaluate( with mlflow.utils.autologging_utils.disable_autologging(): compute_latency = False - if self.extra_metrics: - for extra_metric in self.extra_metrics: - # If latency metric is specified, we will compute latency for the model - # during prediction, and we will remove the metric from the list of extra - # metrics to be computed after prediction. - if extra_metric.name == _LATENCY_METRIC_NAME: - compute_latency = True - self.extra_metrics.remove(extra_metric) - break + for extra_metric in self.extra_metrics: + # If latency metric is specified, we will compute latency for the model + # during prediction, and we will remove the metric from the list of extra + # metrics to be computed after prediction. + if extra_metric.name == _LATENCY_METRIC_NAME: + compute_latency = True + self.extra_metrics.remove(extra_metric) + break self._generate_model_predictions(compute_latency=compute_latency) if self.model_type in (_ModelType.CLASSIFIER, _ModelType.REGRESSOR): self._compute_builtin_metrics() @@ -1594,9 +1641,7 @@ def _evaluate( if self.dataset.has_targets: eval_df["target"] = self.y - self._evaluate_builtin_metrics(eval_df) - self._update_metrics() - self._evaluate_extra_metrics(eval_df) + self._evaluate_metrics(eval_df) if not is_baseline_model: self._log_custom_artifacts(eval_df) @@ -1667,6 +1712,9 @@ def evaluate( else: self.extra_metrics = extra_metrics + if self.extra_metrics is None: + self.extra_metrics = [] + if self.model_type in (_ModelType.CLASSIFIER, _ModelType.REGRESSOR): inferred_model_type = _infer_model_type_by_labels(self.y) if inferred_model_type is not None and model_type != inferred_model_type: diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index 95301616a8633..cc8659f030469 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -2176,8 +2176,11 @@ def validate_question_answering_logged_data( assert logged_data["answer"].tolist() == ["words random", "This is a sentence."] -def test_custom_metrics_deprecated(): - def dummy_fn(eval_df, metrics): +def test_missing_args_raises_exception(): + def dummy_fn1(param_1, param_2, targets, metrics): + pass + + def dummy_fn2(param_3, param_4, builtin_metrics): pass with mlflow.start_run(): @@ -2186,17 +2189,48 @@ def dummy_fn(eval_df, metrics): ) data = pd.DataFrame({"question": ["a", "b"], "answer": ["a", "b"]}) + metric_1 = make_metric(name="metric_1", eval_fn=dummy_fn1, greater_is_better=True) + metric_2 = make_metric(name="metric_2", eval_fn=dummy_fn2, greater_is_better=True) + + error_message = "Error: Metric calculation failed for the following metrics:\nMetric 'metric_1'" + " requires the columns ['param_1', 'param_2']\n\nMetric 'metric_2' requires the columns " + "['param_3', 'builtin_metrics']\n" + with pytest.raises( MlflowException, - match="The 'custom_metrics' parameter in mlflow.evaluate is deprecated. Please update " - "your code to only use the 'extra_metrics' parameter instead.", + match=error_message, ): with mlflow.start_run(): mlflow.evaluate( model_info.model_uri, data, targets="answer", + evaluators="default", model_type="question-answering", + extra_metrics=[metric_1, metric_2], + evaluator_config={"col_mapping": {"param_4": "question"}}, + ) + + +def test_custom_metrics_deprecated( + binary_logistic_regressor_model_uri, + breast_cancer_dataset, +): + def dummy_fn(eval_df, metrics): + pass + + with pytest.raises( + MlflowException, + match="The 'custom_metrics' parameter in mlflow.evaluate is deprecated. Please update " + "your code to only use the 'extra_metrics' parameter instead.", + ): + with mlflow.start_run(): + mlflow.evaluate( + binary_logistic_regressor_model_uri, + breast_cancer_dataset._constructor_args["data"], + targets=breast_cancer_dataset._constructor_args["targets"], + evaluators="default", + model_type="classifier", custom_metrics=[make_metric(eval_fn=dummy_fn, greater_is_better=True)], extra_metrics=[make_metric(eval_fn=dummy_fn, greater_is_better=True)], ) @@ -2206,10 +2240,11 @@ def dummy_fn(eval_df, metrics): with pytest.warns(FutureWarning, match=message): with mlflow.start_run(): mlflow.evaluate( - model_info.model_uri, - data, - targets="answer", - model_type="question-answering", + binary_logistic_regressor_model_uri, + breast_cancer_dataset._constructor_args["data"], + targets=breast_cancer_dataset._constructor_args["targets"], + evaluators="default", + model_type="classifier", custom_metrics=[make_metric(eval_fn=dummy_fn, greater_is_better=True)], ) @@ -2480,6 +2515,10 @@ def test_evaluate_text_summarization_without_targets(): def test_evaluate_text_summarization_fails_to_load_evaluate_metrics(): + from mlflow.metrics.metric_definitions import _cached_evaluate_load + + _cached_evaluate_load.cache_clear() + with mlflow.start_run() as run: model_info = mlflow.pyfunc.log_model( artifact_path="model", python_model=language_model, input_example=["a", "b"] @@ -2890,10 +2929,6 @@ def test_default_metrics_as_custom_metrics(): predictions="answer", model_type="question-answering", custom_metrics=[ - mlflow.metrics.flesch_kincaid_grade_level(), - mlflow.metrics.perplexity(), - mlflow.metrics.ari_grade_level(), - mlflow.metrics.toxicity(), mlflow.metrics.exact_match(), ], evaluators="default", @@ -2902,9 +2937,6 @@ def test_default_metrics_as_custom_metrics(): client = mlflow.MlflowClient() artifacts = [a.path for a in client.list_artifacts(run.info.run_id)] assert "eval_results_table.json" in artifacts - for metric in ["toxicity", "perplexity", "ari_grade_level", "flesch_kincaid_grade_level"]: - for measure in ["mean", "p90", "variance"]: - assert f"{metric}/v1/{measure}" in results.metrics.keys() assert "exact_match/v1" in results.metrics.keys() diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index a61e556db2cd4..98a50c6c0df8b 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -226,7 +226,6 @@ def test_make_genai_metric_correct_response(): model="openai:/gpt-3.5-turbo", grading_context_columns=["targets"], greater_is_better=True, - aggregations=None, ) with mock.patch.object( model_utils, @@ -303,6 +302,22 @@ def test_make_genai_metric_incorrect_response(): assert metric_value.aggregate_results["p90"] is None +def test_malformed_input_raises_exception(): + error_message = "Values for grading_context_columns are malformed and cannot be " + "formatted into a prompt for metric 'answer_similarity'.\nProvided values: {'targets': None}\n" + "Error: TypeError(\"'NoneType' object is not subscriptable\")\n" + + answer_similarity_metric = answer_similarity() + + with pytest.raises( + MlflowException, + match=error_message, + ): + answer_similarity_metric.eval_fn( + pd.Series([mlflow_prediction]), {}, pd.Series([input]), None + ) + + def test_make_genai_metric_multiple(): custom_metric = make_genai_metric( name="correctness", diff --git a/tests/metrics/test_metric_definitions.py b/tests/metrics/test_metric_definitions.py index a1712ed3b801b..5b63b5299f8da 100644 --- a/tests/metrics/test_metric_definitions.py +++ b/tests/metrics/test_metric_definitions.py @@ -172,6 +172,10 @@ def test_rougeLsum(): def test_fails_to_load_metric(): + from mlflow.metrics.metric_definitions import _cached_evaluate_load + + _cached_evaluate_load.cache_clear() + predictions = pd.Series(["random text", "This is a sentence"]) e = ImportError("mocked error") with mock.patch( From 95d9217c2879f9046a3ed0ce37d7d7e4f17b9cff Mon Sep 17 00:00:00 2001 From: Sunish Sheth Date: Thu, 19 Oct 2023 22:00:07 -0700 Subject: [PATCH 033/101] Cleaning up custom metrics from docs strings (#10016) Signed-off-by: Sunish Sheth --- docs/source/models.rst | 4 ++-- examples/evaluation/README.md | 4 ++-- mlflow/models/evaluation/base.py | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/source/models.rst b/docs/source/models.rst index d94dc8f6bbae8..d3d7897daebf3 100644 --- a/docs/source/models.rst +++ b/docs/source/models.rst @@ -3644,10 +3644,10 @@ each model: For additional examples demonstrating the use of ``mlflow.evaluate()`` with LLMs, check out the `MLflow LLMs example repository `_. -Evaluating with Custom Metrics +Evaluating with Extra Metrics ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If the default set of metrics is insufficient, you can supply ``custom_metrics`` and ``custom_artifacts`` +If the default set of metrics is insufficient, you can supply ``extra_metrics`` and ``custom_artifacts`` to :py:func:`mlflow.evaluate()` to produce custom metrics and artifacts for the model(s) that you're evaluating. The following `short example from the MLflow GitHub Repository `_ diff --git a/examples/evaluation/README.md b/examples/evaluation/README.md index 8dcd8fe7a6f50..eaec69df35b88 100644 --- a/examples/evaluation/README.md +++ b/examples/evaluation/README.md @@ -2,7 +2,7 @@ The examples in this directory demonstrate how to use the `mlflow.evaluate()` API. Specifically, they show how to evaluate a PyFunc model on a specified dataset using the builtin default evaluator -and specified custom metrics, where the resulting metrics & artifacts are logged to MLflow Tracking. +and specified extra metrics, where the resulting metrics & artifacts are logged to MLflow Tracking. They also show how to specify validation thresholds for the resulting metrics to validate the quality of your model. See full list of examples below: @@ -18,7 +18,7 @@ of your model. See full list of examples below: with a comprehensive list of custom metric functions on dataset loaded by `sklearn.datasets.fetch_california_housing` - Example `evaluate_with_model_validation.py` trains both a candidate xgboost `XGBClassifier` model and a baseline `DummyClassifier` model on dataset loaded by `shap.datasets.adult`. Then, it validates - the candidate model against specified thresholds on both builtin and custom metrics and the dummy model. + the candidate model against specified thresholds on both builtin and extra metrics and the dummy model. #### Prerequisites diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 9a32d2af0b4d7..7f9a92d8c6d7f 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -1512,10 +1512,10 @@ def model(inputs): :param extra_metrics: (Optional) A list of :py:class:`EvaluationMetric ` objects. See the `mlflow.metrics` module for more information about the - builtin metrics and how to define custom metrics + builtin metrics and how to define extra metrics .. code-block:: python - :caption: Example usage of custom metrics + :caption: Example usage of extra metrics import mlflow import numpy as np From 506e6432d4fdf2054a113ad98e7f33aca5848cb1 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Fri, 20 Oct 2023 17:45:38 +0900 Subject: [PATCH 034/101] Set `MLFLOW_HOME` using `env` (#10022) Signed-off-by: harupy --- .github/workflows/cross-version-tests.yml | 2 +- .github/workflows/examples.yml | 2 +- .github/workflows/master.yml | 4 +--- .github/workflows/r.yml | 2 +- .github/workflows/recipe.yml | 3 +-- dev/dev-env-setup.sh | 6 +++--- dev/install-common-deps.sh | 2 -- dev/run-python-flavor-tests.sh | 2 -- dev/run-python-sagemaker-tests.sh | 2 -- dev/run-python-tests.sh | 1 - dev/test-dev-env-setup.sh | 8 ++++---- 11 files changed, 12 insertions(+), 22 deletions(-) diff --git a/.github/workflows/cross-version-tests.yml b/.github/workflows/cross-version-tests.yml index 860df0a01e017..69e17b4b68e60 100644 --- a/.github/workflows/cross-version-tests.yml +++ b/.github/workflows/cross-version-tests.yml @@ -41,6 +41,7 @@ defaults: shell: bash --noprofile --norc -exo pipefail {0} env: + MLFLOW_HOME: /home/runner/work/mlflow/mlflow PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu jobs: @@ -197,5 +198,4 @@ jobs: PACKAGE_VERSION: ${{ matrix.version }} JOHNSNOWLABS_LICENSE_JSON: ${{ secrets.JOHNSNOWLABS_LICENSE_JSON }} run: | - export MLFLOW_HOME=$(pwd) ${{ matrix.run }} diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index abf92c5343cd4..f3f7926abe1d1 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -33,6 +33,7 @@ defaults: shell: bash --noprofile --norc -exo pipefail {0} env: + MLFLOW_HOME: /home/runner/work/mlflow/mlflow MLFLOW_CONDA_HOME: /usr/share/miniconda jobs: @@ -74,7 +75,6 @@ jobs: env: SPARK_LOCAL_IP: localhost run: | - export MLFLOW_HOME=$(pwd) pytest tests/examples --durations=30 - name: Remove conda environments diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 22e09d7bbd6a3..7219d6c8bce83 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -21,6 +21,7 @@ defaults: shell: bash --noprofile --norc -exo pipefail {0} env: + MLFLOW_HOME: /home/runner/work/mlflow/mlflow # Note miniconda is pre-installed in the virtual environments for GitHub Actions: # https://github.com/actions/virtual-environments/blob/main/images/linux/scripts/installers/miniconda.sh MLFLOW_CONDA_HOME: /usr/share/miniconda @@ -251,7 +252,6 @@ jobs: - uses: ./.github/actions/pipdeptree - name: Run tests run: | - export MLFLOW_HOME=$(pwd) pytest tests/models # NOTE: numpy is pinned in this suite due to its heavy reliance on shap, which internally uses @@ -277,7 +277,6 @@ jobs: - uses: ./.github/actions/pipdeptree - name: Run tests run: | - export MLFLOW_HOME=$(pwd) dev/pytest.sh tests/evaluate pyfunc: @@ -300,7 +299,6 @@ jobs: - uses: ./.github/actions/pipdeptree - name: Run tests run: | - export MLFLOW_HOME=$(pwd) pytest --durations=30 tests/pyfunc --ignore tests/pyfunc/test_spark_connect.py # test_spark_connect.py fails if it's run with ohter tests, so run it separately. diff --git a/.github/workflows/r.yml b/.github/workflows/r.yml index db9bc24e62397..0e629a0773cbb 100644 --- a/.github/workflows/r.yml +++ b/.github/workflows/r.yml @@ -18,6 +18,7 @@ concurrency: cancel-in-progress: true env: + MLFLOW_HOME: /home/runner/work/mlflow/mlflow PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION: python jobs: @@ -90,5 +91,4 @@ jobs: LINTR_COMMENT_BOT: false run: | cd tests - export MLFLOW_HOME=$(git rev-parse --show-toplevel) Rscript -e 'source("../.run-tests.R", echo=TRUE)' diff --git a/.github/workflows/recipe.yml b/.github/workflows/recipe.yml index 865c1a945e33c..84033b04c24e2 100644 --- a/.github/workflows/recipe.yml +++ b/.github/workflows/recipe.yml @@ -21,6 +21,7 @@ defaults: shell: bash --noprofile --norc -exo pipefail {0} env: + MLFLOW_HOME: /home/runner/work/mlflow/mlflow # Note miniconda is pre-installed in the virtual environments for GitHub Actions: # https://github.com/actions/virtual-environments/blob/main/images/linux/scripts/installers/miniconda.sh MLFLOW_CONDA_HOME: /usr/share/miniconda @@ -46,7 +47,6 @@ jobs: pip install 'pyspark<3.5' - name: Run tests run: | - export MLFLOW_HOME=$(pwd) pytest tests/recipes recipes-windows: @@ -81,6 +81,5 @@ jobs: # Set Hadoop environment variables required for testing Spark integrations on Windows export HADOOP_HOME=/tmp/winutils/hadoop-3.2.2 export PATH=$PATH:$HADOOP_HOME/bin - # Run recipes tests export MLFLOW_HOME=$(pwd) pytest tests/recipes diff --git a/dev/dev-env-setup.sh b/dev/dev-env-setup.sh index 6efd841e58537..fbb2616934faf 100755 --- a/dev/dev-env-setup.sh +++ b/dev/dev-env-setup.sh @@ -21,7 +21,7 @@ This script will: Example usage: - From root of MLflow repository on local with a destination virtualenv path of /.venvs/mlflow-dev: + From root of MLflow repository on local with a destination virtualenv path of /.venvs/mlflow-dev: dev/dev-env-setup.sh -d $(pwd)/.venvs/mlflow-dev @@ -151,8 +151,8 @@ if [ -z "$pyenv_exist" ]; then fi fi -MLFLOW_HOME=$(pwd) -rd="$MLFLOW_HOME/requirements" +REPO_ROOT=$(git rev-parse --show-toplevel) +rd="$REPO_ROOT/requirements" # Get the minimum supported version for development purposes min_py_version="3.8" diff --git a/dev/install-common-deps.sh b/dev/install-common-deps.sh index 2134f8dad20ac..052deb10a16af 100755 --- a/dev/install-common-deps.sh +++ b/dev/install-common-deps.sh @@ -52,7 +52,6 @@ if [[ "$SKINNY" == "true" ]]; then else pip install .[extras] --upgrade fi -export MLFLOW_HOME=$(pwd) req_files="" # Install Python test dependencies only if we're running Python tests @@ -77,7 +76,6 @@ pip install --no-dependencies tests/resources/mlflow-test-plugin pip install aiohttp python dev/show_package_release_dates.py which mlflow -echo $MLFLOW_HOME # Print mlflow version mlflow --version diff --git a/dev/run-python-flavor-tests.sh b/dev/run-python-flavor-tests.sh index b21e3e3a342f3..5a18a435c8491 100755 --- a/dev/run-python-flavor-tests.sh +++ b/dev/run-python-flavor-tests.sh @@ -1,8 +1,6 @@ #!/usr/bin/env bash set -x -export MLFLOW_HOME=$(pwd) - pytest \ tests/utils/test_model_utils.py \ tests/tracking/fluent/test_fluent_autolog.py \ diff --git a/dev/run-python-sagemaker-tests.sh b/dev/run-python-sagemaker-tests.sh index f7b0cc5c3eeda..b55f098cc0da2 100755 --- a/dev/run-python-sagemaker-tests.sh +++ b/dev/run-python-sagemaker-tests.sh @@ -1,6 +1,4 @@ #!/usr/bin/env bash set -ex -export MLFLOW_HOME=$(pwd) - pytest tests/sagemaker diff --git a/dev/run-python-tests.sh b/dev/run-python-tests.sh index 807ee7c777d42..a2b7a947104f9 100755 --- a/dev/run-python-tests.sh +++ b/dev/run-python-tests.sh @@ -4,7 +4,6 @@ set -x # https://stackoverflow.com/a/42219754 err=0 trap 'err=1' ERR -export MLFLOW_HOME=$(pwd) pytest tests --quiet --requires-ssh --ignore-flavors --ignore=tests/examples --ignore=tests/recipes --ignore=tests/evaluate diff --git a/dev/test-dev-env-setup.sh b/dev/test-dev-env-setup.sh index 623c089de13fd..bb688c000a017 100755 --- a/dev/test-dev-env-setup.sh +++ b/dev/test-dev-env-setup.sh @@ -15,13 +15,13 @@ set -x err=0 -MLFLOW_HOME=$(pwd) -export MLFLOW_HOME +REPO_ROOT=$(git rev-parse --show-toplevel) +export REPO_ROOT # Run the installation of the environment -DEV_DIR=$MLFLOW_HOME/.venvs/mlflow-dev +DEV_DIR=$REPO_ROOT/.venvs/mlflow-dev -"$MLFLOW_HOME"/dev/dev-env-setup.sh -d "$DEV_DIR" -f +"$REPO_ROOT"/dev/dev-env-setup.sh -d "$DEV_DIR" -f source "$DEV_DIR/bin/activate" From 5fd619603cf17673ade4075afdb18344bb0e66f2 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Fri, 20 Oct 2023 17:50:44 +0900 Subject: [PATCH 035/101] Ask where the bug occurred in the bug template (#10028) Signed-off-by: harupy --- .github/ISSUE_TEMPLATE/bug_report_template.yaml | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/.github/ISSUE_TEMPLATE/bug_report_template.yaml b/.github/ISSUE_TEMPLATE/bug_report_template.yaml index 4b0c0482eb260..ac1045ce4847e 100644 --- a/.github/ISSUE_TEMPLATE/bug_report_template.yaml +++ b/.github/ISSUE_TEMPLATE/bug_report_template.yaml @@ -17,6 +17,16 @@ body: options: - label: I have read and agree to submit bug reports in accordance with the [issues policy](https://www.github.com/mlflow/mlflow/blob/master/ISSUE_POLICY.md) required: true + - type: dropdown + attributes: + label: Where did you encounter this bug? + options: + - Local machine + - Databricks + - Azure Machine Learning + - Other + validations: + required: true - type: dropdown id: contribution attributes: From 54d463091fa12b641dec26f59d3bbdba78e059ac Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Fri, 20 Oct 2023 18:57:57 +0900 Subject: [PATCH 036/101] Remvoe useless test runner scripts (#10030) Signed-off-by: harupy --- .github/workflows/master.yml | 11 ++++++++--- dev/run-python-flavor-tests.sh | 8 -------- dev/run-python-sagemaker-tests.sh | 4 ---- dev/run-python-tests.sh | 10 ---------- 4 files changed, 8 insertions(+), 25 deletions(-) delete mode 100755 dev/run-python-flavor-tests.sh delete mode 100755 dev/run-python-sagemaker-tests.sh delete mode 100755 dev/run-python-tests.sh diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 7219d6c8bce83..c4cceb991197e 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -114,7 +114,8 @@ jobs: run: | . ~/.venv/bin/activate source dev/setup-ssh.sh - ./dev/run-python-tests.sh + pytest tests --quiet --requires-ssh --ignore-flavors \ + --ignore=tests/examples --ignore=tests/recipes --ignore=tests/evaluate database: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false @@ -228,7 +229,11 @@ jobs: - name: Run tests run: | . ~/.venv/bin/activate - ./dev/run-python-flavor-tests.sh; + pytest \ + tests/utils/test_model_utils.py \ + tests/tracking/fluent/test_fluent_autolog.py \ + tests/autologging \ + tests/server/auth # It takes 9 ~ 10 minutes to run tests in `tests/models`. To make CI finish faster, # run these tests in a separate job. @@ -321,7 +326,7 @@ jobs: - uses: ./.github/actions/pipdeptree - name: Run tests run: | - ./dev/run-python-sagemaker-tests.sh; + pytest tests/sagemaker windows: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false diff --git a/dev/run-python-flavor-tests.sh b/dev/run-python-flavor-tests.sh deleted file mode 100755 index 5a18a435c8491..0000000000000 --- a/dev/run-python-flavor-tests.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env bash -set -x - -pytest \ - tests/utils/test_model_utils.py \ - tests/tracking/fluent/test_fluent_autolog.py \ - tests/autologging \ - tests/server/auth diff --git a/dev/run-python-sagemaker-tests.sh b/dev/run-python-sagemaker-tests.sh deleted file mode 100755 index b55f098cc0da2..0000000000000 --- a/dev/run-python-sagemaker-tests.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/usr/bin/env bash -set -ex - -pytest tests/sagemaker diff --git a/dev/run-python-tests.sh b/dev/run-python-tests.sh deleted file mode 100755 index a2b7a947104f9..0000000000000 --- a/dev/run-python-tests.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/env bash -set -x -# Set err=1 if any commands exit with non-zero status as described in -# https://stackoverflow.com/a/42219754 -err=0 -trap 'err=1' ERR - -pytest tests --quiet --requires-ssh --ignore-flavors --ignore=tests/examples --ignore=tests/recipes --ignore=tests/evaluate - -test $err = 0 From 209378ba152e3cf62bbe6b66b26cd327d4c15e36 Mon Sep 17 00:00:00 2001 From: Anushka Bhowmick <76967222+Anushka-Bhowmick@users.noreply.github.com> Date: Fri, 20 Oct 2023 16:49:40 +0530 Subject: [PATCH 037/101] update .gitignore file (#10033) Signed-off-by: Anushka Bhowmick <76967222+Anushka-Bhowmick@users.noreply.github.com> --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index f448ebaf4f360..633f40b2f37ad 100644 --- a/.gitignore +++ b/.gitignore @@ -104,6 +104,7 @@ travis_wait*.log lightning_logs a.py +a.ipynb a.md # Log file created by pre-commit hook for black From 424ebcb352519732003051ce8333dae85be23f3a Mon Sep 17 00:00:00 2001 From: Serena Ruan <82044803+serena-ruan@users.noreply.github.com> Date: Fri, 20 Oct 2023 23:43:36 +0800 Subject: [PATCH 038/101] fix fill-mask with multiple masks (#10032) Signed-off-by: Serena Ruan --- mlflow/transformers/__init__.py | 10 +++++++++- tests/transformers/test_transformers_model_export.py | 11 +++++++++++ 2 files changed, 20 insertions(+), 1 deletion(-) diff --git a/mlflow/transformers/__init__.py b/mlflow/transformers/__init__.py index 3433a65ad5d9c..015196a54d2f5 100644 --- a/mlflow/transformers/__init__.py +++ b/mlflow/transformers/__init__.py @@ -2316,8 +2316,16 @@ def _parse_list_of_multiple_dicts(output_data, target_dict_key): Returns the first value of the `target_dict_key` that matches in the first dictionary in a list of dictionaries. """ + + def fetch_target_key_value(data, key): + if isinstance(data[0], dict): + return data[0][key] + return [item[0][key] for item in data] + if isinstance(output_data[0], list): - return [collection[0][target_dict_key] for collection in output_data] + return [ + fetch_target_key_value(collection, target_dict_key) for collection in output_data + ] else: return [output_data[0][target_dict_key]] diff --git a/tests/transformers/test_transformers_model_export.py b/tests/transformers/test_transformers_model_export.py index cca0f080e9e09..6d2df7369f326 100644 --- a/tests/transformers/test_transformers_model_export.py +++ b/tests/transformers/test_transformers_model_export.py @@ -1644,6 +1644,17 @@ def test_fill_mask_pipeline(fill_mask_pipeline, model_path, inference_payload, r assert pd_inference == result +def test_fill_mask_pipeline_with_multiple_masks(fill_mask_pipeline, model_path): + data = ["I the whole of ", "I the whole of "] + + mlflow.transformers.save_model(fill_mask_pipeline, path=model_path) + pyfunc_loaded = mlflow.pyfunc.load_model(model_path) + + inference = pyfunc_loaded.predict(data) + assert len(inference) == 2 + assert all(len(value) == 3 for value in inference) + + @pytest.mark.parametrize( "invalid_data", [ From 299d2f025795f101769daf7390db26b59638e00c Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Fri, 20 Oct 2023 12:09:14 -0700 Subject: [PATCH 039/101] Add example notebooks for QA (#10018) --- .../LLM Evaluation Examples -- QA.ipynb | 1755 +++++++++++++++++ 1 file changed, 1755 insertions(+) create mode 100644 examples/evaluation/LLM Evaluation Examples -- QA.ipynb diff --git a/examples/evaluation/LLM Evaluation Examples -- QA.ipynb b/examples/evaluation/LLM Evaluation Examples -- QA.ipynb new file mode 100644 index 0000000000000..a3dfcef0454cb --- /dev/null +++ b/examples/evaluation/LLM Evaluation Examples -- QA.ipynb @@ -0,0 +1,1755 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "0a87a4cd-8a01-4e35-8a71-eaf91ed4ddd2", + "showTitle": false, + "title": "" + } + }, + "source": [ + "# LLM Evaluation with MLflow Example Notebook\n", + "\n", + "In this notebook, we will demonstrate how to evaluate various LLMs and RAG systems with MLflow, leveraging simple metrics such as perplexity and toxicity, as well as LLM-judged metrics such as relevance, and even custom LLM-judged metrics such as professionalism" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "cce6412a-2279-4ec1-a344-fa76fec70ee1", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Set OpenAI Key" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "fb946228-62fb-4d68-9732-75935c9cb401", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "import os" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "bec25067-224d-4ee8-9b5d-0beeb6cde684", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "os.environ[\"OPENAI_API_KEY\"] = \"redacted\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import openai\n", + "import pandas as pd\n", + "\n", + "import mlflow" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "a9bbfc03-793e-4b95-b009-ef30dccd7e7d", + "showTitle": false, + "title": "" + } + }, + "source": [ + "## Basic Question-Answering Evaluation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "ff253b9e-59e8-40e0-92d8-8f9ef85348fd", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Create a test case of `inputs` that will be passed into the model and `ground_truth` which will be used to compare against the generated output from the model." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "6199fb3f-5951-42fe-891a-2227010b630a", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "eval_df = pd.DataFrame(\n", + " {\n", + " \"inputs\": [\n", + " \"How does useEffect() work?\",\n", + " \"What does the static keyword in a function mean?\",\n", + " \"What does the 'finally' block in Python do?\",\n", + " \"What is the difference between multiprocessing and multithreading?\",\n", + " ],\n", + " \"ground_truth\": [\n", + " \"The useEffect() hook tells React that your component needs to do something after render. React will remember the function you passed (we’ll refer to it as our “effect”), and call it later after performing the DOM updates.\",\n", + " \"Static members belongs to the class, rather than a specific instance. This means that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any. It will be shared by all objects.\",\n", + " \"'Finally' defines a block of code to run when the try... except...else block is final. The finally block will be executed no matter if the try block raises an error or not.\",\n", + " \"Multithreading refers to the ability of a processor to execute multiple threads concurrently, where each thread runs a process. Whereas multiprocessing refers to the ability of a system to run multiple processors in parallel, where each processor can run one or more threads.\",\n", + " ],\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "06825224-49bd-452d-8dab-b11ca8130017", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Create a simple OpenAI model that asks gpt-3.5 to answer the question in two sentences. Call `mlflow.evaluate()` with the model and evaluation dataframe. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "7b67eb6f-c91a-4f9a-ac0d-01fd22b087c8", + "showTitle": false, + "title": "" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023/10/19 22:35:53 WARNING mlflow.models.model: Model logged without a signature. Signatures will be required for upcoming model registry features as they validate model inputs and denote the expected schema of model outputs. Please visit https://www.mlflow.org/docs/2.7.2/models.html#set-signature-on-logged-model for instructions on setting a model signature on your logged model.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d4bf4e330c1541819217be2deee3dd2b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Uploading artifacts: 0%| | 0/5 [00:00\n", + " .table-result-container {\n", + " max-height: 300px;\n", + " overflow: auto;\n", + " }\n", + " table, th, td {\n", + " border: 1px solid black;\n", + " border-collapse: collapse;\n", + " }\n", + " th, td {\n", + " padding: 5px;\n", + " }\n", + " th {\n", + " text-align: left;\n", + " }\n", + "
inputsground_truthoutputstoken_countperplexity/v1/scoreflesch_kincaid_grade_level/v1/scoreari_grade_level/v1/score
How does useEffect() work?The useEffect() hook tells React that your component needs to do something after render. React will remember the function you passed (we’ll refer to it as our “effect”), and call it later after performing the DOM updates.useEffect() is a hook in React that allows you to perform side effects in functional components. It takes a function as its first argument and runs that function after the component has rendered and whenever any of its dependencies have changed.4523.479785919211.312.4
What does the static keyword in a function mean?Static members belongs to the class, rather than a specific instance. This means that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any. It will be shared by all objects.The static keyword in a function means that the function can only be accessed within the same file it is declared in, and is not accessible to other files. It is used to limit the scope of the function and improve encapsulation.4618.2027149210.310.0
What does the 'finally' block in Python do?'Finally' defines a block of code to run when the try... except...else block is final. The finally block will be executed no matter if the try block raises an error or not.The 'finally' block in Python is used to define a block of code that will be executed regardless of whether an exception has occurred or not, ensuring that certain clean-up actions are always performed. It is typically used to close files, release resources, or clean up any operations that need to be done before exiting a try-except block.6823.30614280713.416.1
What is the difference between multiprocessing and multithreading?Multithreading refers to the ability of a processor to execute multiple threads concurrently, where each thread runs a process. Whereas multiprocessing refers to the ability of a system to run multiple processors in parallel, where each processor can run one or more threads.Multiprocessing involves the simultaneous execution of multiple tasks on multiple processors, while multithreading involves the execution of multiple threads within a single process, allowing for concurrent execution of different parts of the program.3912.881818771423.226.3
" + ] + }, + "metadata": { + "application/vnd.databricks.v1+output": { + "addedWidgets": {}, + "aggData": [], + "aggError": "", + "aggOverflow": false, + "aggSchema": [], + "aggSeriesLimitReached": false, + "aggType": "", + "arguments": {}, + "columnCustomDisplayInfos": {}, + "data": [ + [ + "How does useEffect() work?", + "The useEffect() hook tells React that your component needs to do something after render. React will remember the function you passed (we’ll refer to it as our “effect”), and call it later after performing the DOM updates.", + "useEffect() is a hook in React that allows you to perform side effects in functional components. It takes a function as its first argument and runs that function after the component has rendered and whenever any of its dependencies have changed.", + 45, + 23.4797859192, + 11.3, + 12.4 + ], + [ + "What does the static keyword in a function mean?", + "Static members belongs to the class, rather than a specific instance. This means that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any. It will be shared by all objects.", + "The static keyword in a function means that the function can only be accessed within the same file it is declared in, and is not accessible to other files. It is used to limit the scope of the function and improve encapsulation.", + 46, + 18.20271492, + 10.3, + 10 + ], + [ + "What does the 'finally' block in Python do?", + "'Finally' defines a block of code to run when the try... except...else block is final. The finally block will be executed no matter if the try block raises an error or not.", + "The 'finally' block in Python is used to define a block of code that will be executed regardless of whether an exception has occurred or not, ensuring that certain clean-up actions are always performed. It is typically used to close files, release resources, or clean up any operations that need to be done before exiting a try-except block.", + 68, + 23.306142807, + 13.4, + 16.1 + ], + [ + "What is the difference between multiprocessing and multithreading?", + "Multithreading refers to the ability of a processor to execute multiple threads concurrently, where each thread runs a process. Whereas multiprocessing refers to the ability of a system to run multiple processors in parallel, where each processor can run one or more threads.", + "Multiprocessing involves the simultaneous execution of multiple tasks on multiple processors, while multithreading involves the execution of multiple threads within a single process, allowing for concurrent execution of different parts of the program.", + 39, + 12.8818187714, + 23.2, + 26.3 + ] + ], + "datasetInfos": [], + "dbfsResultPath": null, + "isJsonSchema": true, + "metadata": {}, + "overflow": false, + "plotOptions": { + "customPlotOptions": {}, + "displayType": "table", + "pivotAggregation": null, + "pivotColumns": null, + "xColumns": null, + "yColumns": null + }, + "removedWidgets": [], + "schema": [ + { + "metadata": "{}", + "name": "inputs", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "ground_truth", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "outputs", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "token_count", + "type": "\"long\"" + }, + { + "metadata": "{}", + "name": "perplexity/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "flesch_kincaid_grade_level/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "ari_grade_level/v1/score", + "type": "\"double\"" + } + ], + "type": "table" + } + }, + "output_type": "display_data" + } + ], + "source": [ + "results.tables[\"eval_results_table\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "1a7363c9-3b73-4e3f-bf7c-1d6887fb4f9e", + "showTitle": false, + "title": "" + } + }, + "source": [ + "## LLM-judged correctness with OpenAI GPT-4" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "cd23fe79-cfbf-42a7-a3f3-14badfe20db5", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Construct an answer similarity metric using the `answer_similarity()` metric factory function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "88b35b52-5b8f-4b72-9de8-fec05f01e722", + "showTitle": false, + "title": "" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "EvaluationMetric(name=answer_similarity, greater_is_better=True, long_name=answer_similarity, version=v1, metric_details=\n", + "Task:\n", + "You are an impartial judge. You will be given an input that was sent to a machine\n", + "learning model, and you will be given an output that the model produced. You\n", + "may also be given additional information that was used by the model to generate the output.\n", + "\n", + "Your task is to determine a numerical score called answer_similarity based on the input and output.\n", + "A definition of answer_similarity and a grading rubric are provided below.\n", + "You must use the grading rubric to determine your score. You must also justify your score.\n", + "\n", + "Examples could be included below for reference. Make sure to use them as references and to\n", + "understand them before completing the task.\n", + "\n", + "Input:\n", + "{input}\n", + "\n", + "Output:\n", + "{output}\n", + "\n", + "{grading_context_columns}\n", + "\n", + "Metric definition:\n", + "Answer similarity is evaluated on the degree of semantic similarity of the provided output to the provided targets, which is the ground truth. Scores can be assigned based on the gradual similarity in meaning and description to the provided targets, where a higher score indicates greater alignment between the provided output and provided targets.\n", + "\n", + "Grading rubric:\n", + "Answer similarity: Below are the details for different scores:\n", + "- Score 1: the output has little to no semantic similarity to the provided targets.\n", + "- Score 2: the output displays partial semantic similarity to the provided targets on some aspects.\n", + "- Score 3: the output has moderate semantic similarity to the provided targets.\n", + "- Score 4: the output aligns with the provided targets in most aspects and has substantial semantic similarity.\n", + "- Score 5: the output closely aligns with the provided targets in all significant aspects.\n", + "\n", + "Examples:\n", + "\n", + "Input:\n", + "What is MLflow?\n", + "\n", + "Output:\n", + "MLflow is an open-source platform for managing machine learning workflows, including experiment tracking, model packaging, versioning, and deployment, simplifying the ML lifecycle.\n", + "\n", + "Additional information used by the model:\n", + "key: ground_truth\n", + "value:\n", + "MLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, a company that specializes in big data and machine learning solutions. MLflow is designed to address the challenges that data scientists and machine learning engineers face when developing, training, and deploying machine learning models.\n", + "\n", + "score: 4\n", + "justification: The definition effectively explains what MLflow is its purpose, and its developer. It could be more concise for a 5-score.\n", + " \n", + "\n", + "You must return the following fields in your response one below the other:\n", + "score: Your numerical score for the model's answer_similarity based on the rubric\n", + "justification: Your step-by-step reasoning about the model's answer_similarity score\n", + " )\n" + ] + } + ], + "source": [ + "from mlflow.metrics import EvaluationExample, answer_similarity\n", + "\n", + "# Create an example to describe what answer_similarity means like for this problem.\n", + "example = EvaluationExample(\n", + " input=\"What is MLflow?\",\n", + " output=\"MLflow is an open-source platform for managing machine \"\n", + " \"learning workflows, including experiment tracking, model packaging, \"\n", + " \"versioning, and deployment, simplifying the ML lifecycle.\",\n", + " score=4,\n", + " justification=\"The definition effectively explains what MLflow is \"\n", + " \"its purpose, and its developer. It could be more concise for a 5-score.\",\n", + " grading_context={\n", + " \"ground_truth\": \"MLflow is an open-source platform for managing \"\n", + " \"the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, \"\n", + " \"a company that specializes in big data and machine learning solutions. MLflow is \"\n", + " \"designed to address the challenges that data scientists and machine learning \"\n", + " \"engineers face when developing, training, and deploying machine learning models.\"\n", + " },\n", + ")\n", + "\n", + "# Construct the metric using OpenAI GPT-4 as the judge\n", + "answer_similarity_metric = answer_similarity(model=\"openai:/gpt-4\", examples=[example])\n", + "\n", + "print(answer_similarity_metric)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "d627f7ab-a7e1-430d-9431-9ce4bd810fa7", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Call `mlflow.evaluate()` again but with your new `answer_similarity_metric`" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "cae9d80b-39a2-4e98-ac08-bfa5ba387b8f", + "showTitle": false, + "title": "" + } + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "4a9ad395386743a0a44cce1875382e27", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Downloading artifacts: 0%| | 0/5 [00:00\n", + " .table-result-container {\n", + " max-height: 300px;\n", + " overflow: auto;\n", + " }\n", + " table, th, td {\n", + " border: 1px solid black;\n", + " border-collapse: collapse;\n", + " }\n", + " th, td {\n", + " padding: 5px;\n", + " }\n", + " th {\n", + " text-align: left;\n", + " }\n", + "
inputsground_truthoutputstoken_countperplexity/v1/scoreflesch_kincaid_grade_level/v1/scoreari_grade_level/v1/scoreanswer_similarity/v1/scoreanswer_similarity/v1/justification
How does useEffect() work?The useEffect() hook tells React that your component needs to do something after render. React will remember the function you passed (we’ll refer to it as our “effect”), and call it later after performing the DOM updates.useEffect() is a function in React that allows you to perform side effects in a functional component. It takes two arguments: a callback function that will be run after the component renders, and an array of dependencies to determine when the callback should be invoked.5115.581936836212.113.54The output provided by the model aligns well with the additional information provided. Both the output and the additional information explain that useEffect() is a function in React that performs actions after the component renders. The output also mentions the two arguments that useEffect() takes, which is not mentioned in the additional information. However, the output does not mention that React will remember the function passed to useEffect() and call it after performing the DOM updates, which is mentioned in the additional information. Therefore, while the output is largely accurate and aligns with the additional information in most aspects, it does not align in all significant aspects, which is why it receives a score of 4.
What does the static keyword in a function mean?Static members belongs to the class, rather than a specific instance. This means that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any. It will be shared by all objects.In C++, the static keyword used in a function declaration means that the function is limited to the scope of the translation unit and cannot be accessed by other files. In C#, the static keyword used in a function declaration means that the function belongs to the class itself and not to any specific instance of the class.6210.34881877912.214.74The output provided by the model aligns well with the target information. It correctly explains the meaning of the static keyword in a function in both C++ and C#. However, it does not explicitly mention that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any, which is mentioned in the target information. Therefore, while the output is largely accurate and informative, it lacks some details present in the target information, which prevents it from achieving a perfect score.
What does the 'finally' block in Python do?'Finally' defines a block of code to run when the try... except...else block is final. The finally block will be executed no matter if the try block raises an error or not.The 'finally' block in Python is used to specify code that will always be executed, whether or not an exception is raised in the try block. This block is typically used for clean-up actions, such as closing files or releasing resources, to ensure they are always performed, regardless of any exceptions.6026.538370132412.114.75The output provided by the model aligns very closely with the provided targets. Both the output and the targets explain that the 'finally' block in Python is used to specify code that will always be executed, regardless of whether an exception is raised in the try block. The output also adds that this block is typically used for clean-up actions, which is an important aspect of the 'finally' block's functionality. Therefore, the output has a high degree of semantic similarity to the provided targets, warranting a score of 5.
What is the difference between multiprocessing and multithreading?Multithreading refers to the ability of a processor to execute multiple threads concurrently, where each thread runs a process. Whereas multiprocessing refers to the ability of a system to run multiple processors in parallel, where each processor can run one or more threads.Multiprocessing is the use of multiple processors or cores to execute multiple tasks simultaneously, while multithreading is the use of multiple threads within a single process to execute different parts of the code concurrently. In simple terms, multiprocessing involves running multiple processes, while multithreading involves running multiple threads within a single process.626.771638393418.221.34The output provided by the model aligns closely with the provided targets. Both the output and the targets explain the difference between multiprocessing and multithreading, and they both mention that multiprocessing involves multiple processors or cores, while multithreading involves multiple threads within a single process. However, the output does not mention that each processor in multiprocessing can run one or more threads, which is mentioned in the targets. Therefore, the output aligns with the targets in most aspects, but not all, which is why a score of 4 is given.
" + ] + }, + "metadata": { + "application/vnd.databricks.v1+output": { + "addedWidgets": {}, + "aggData": [], + "aggError": "", + "aggOverflow": false, + "aggSchema": [], + "aggSeriesLimitReached": false, + "aggType": "", + "arguments": {}, + "columnCustomDisplayInfos": {}, + "data": [ + [ + "How does useEffect() work?", + "The useEffect() hook tells React that your component needs to do something after render. React will remember the function you passed (we’ll refer to it as our “effect”), and call it later after performing the DOM updates.", + "useEffect() is a function in React that allows you to perform side effects in a functional component. It takes two arguments: a callback function that will be run after the component renders, and an array of dependencies to determine when the callback should be invoked.", + 51, + 15.5819368362, + 12.1, + 13.5, + 4, + "The output provided by the model aligns well with the additional information provided. Both the output and the additional information explain that useEffect() is a function in React that performs actions after the component renders. The output also mentions the two arguments that useEffect() takes, which is not mentioned in the additional information. However, the output does not mention that React will remember the function passed to useEffect() and call it after performing the DOM updates, which is mentioned in the additional information. Therefore, while the output is largely accurate and aligns with the additional information in most aspects, it does not align in all significant aspects, which is why it receives a score of 4." + ], + [ + "What does the static keyword in a function mean?", + "Static members belongs to the class, rather than a specific instance. This means that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any. It will be shared by all objects.", + "In C++, the static keyword used in a function declaration means that the function is limited to the scope of the translation unit and cannot be accessed by other files. In C#, the static keyword used in a function declaration means that the function belongs to the class itself and not to any specific instance of the class.", + 62, + 10.348818779, + 12.2, + 14.7, + 4, + "The output provided by the model aligns well with the target information. It correctly explains the meaning of the static keyword in a function in both C++ and C#. However, it does not explicitly mention that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any, which is mentioned in the target information. Therefore, while the output is largely accurate and informative, it lacks some details present in the target information, which prevents it from achieving a perfect score." + ], + [ + "What does the 'finally' block in Python do?", + "'Finally' defines a block of code to run when the try... except...else block is final. The finally block will be executed no matter if the try block raises an error or not.", + "The 'finally' block in Python is used to specify code that will always be executed, whether or not an exception is raised in the try block. This block is typically used for clean-up actions, such as closing files or releasing resources, to ensure they are always performed, regardless of any exceptions.", + 60, + 26.5383701324, + 12.1, + 14.7, + 5, + "The output provided by the model aligns very closely with the provided targets. Both the output and the targets explain that the 'finally' block in Python is used to specify code that will always be executed, regardless of whether an exception is raised in the try block. The output also adds that this block is typically used for clean-up actions, which is an important aspect of the 'finally' block's functionality. Therefore, the output has a high degree of semantic similarity to the provided targets, warranting a score of 5." + ], + [ + "What is the difference between multiprocessing and multithreading?", + "Multithreading refers to the ability of a processor to execute multiple threads concurrently, where each thread runs a process. Whereas multiprocessing refers to the ability of a system to run multiple processors in parallel, where each processor can run one or more threads.", + "Multiprocessing is the use of multiple processors or cores to execute multiple tasks simultaneously, while multithreading is the use of multiple threads within a single process to execute different parts of the code concurrently. In simple terms, multiprocessing involves running multiple processes, while multithreading involves running multiple threads within a single process.", + 62, + 6.7716383934, + 18.2, + 21.3, + 4, + "The output provided by the model aligns closely with the provided targets. Both the output and the targets explain the difference between multiprocessing and multithreading, and they both mention that multiprocessing involves multiple processors or cores, while multithreading involves multiple threads within a single process. However, the output does not mention that each processor in multiprocessing can run one or more threads, which is mentioned in the targets. Therefore, the output aligns with the targets in most aspects, but not all, which is why a score of 4 is given." + ] + ], + "datasetInfos": [], + "dbfsResultPath": null, + "isJsonSchema": true, + "metadata": {}, + "overflow": false, + "plotOptions": { + "customPlotOptions": {}, + "displayType": "table", + "pivotAggregation": null, + "pivotColumns": null, + "xColumns": null, + "yColumns": null + }, + "removedWidgets": [], + "schema": [ + { + "metadata": "{}", + "name": "inputs", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "ground_truth", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "outputs", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "token_count", + "type": "\"long\"" + }, + { + "metadata": "{}", + "name": "perplexity/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "flesch_kincaid_grade_level/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "ari_grade_level/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "answer_similarity/v1/score", + "type": "\"long\"" + }, + { + "metadata": "{}", + "name": "answer_similarity/v1/justification", + "type": "\"string\"" + } + ], + "type": "table" + } + }, + "output_type": "display_data" + } + ], + "source": [ + "results.tables[\"eval_results_table\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "85402663-b9d7-4812-a7d2-32aa5b929687", + "showTitle": false, + "title": "" + } + }, + "source": [ + "## Custom LLM-judged metric for professionalism" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "a8765226-5d95-49e8-88d8-5ba442ea3b9b", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Create a custom metric that will be used to determine professionalism of the model outputs. Use `make_genai_metric` with a metric definition, grading prompt, grading example, and judge model configuration" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "45cca2ec-e06b-4d51-9dde-3cc630df9244", + "showTitle": false, + "title": "" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "EvaluationMetric(name=professionalism, greater_is_better=True, long_name=professionalism, version=v1, metric_details=\n", + "Task:\n", + "You are an impartial judge. You will be given an input that was sent to a machine\n", + "learning model, and you will be given an output that the model produced. You\n", + "may also be given additional information that was used by the model to generate the output.\n", + "\n", + "Your task is to determine a numerical score called professionalism based on the input and output.\n", + "A definition of professionalism and a grading rubric are provided below.\n", + "You must use the grading rubric to determine your score. You must also justify your score.\n", + "\n", + "Examples could be included below for reference. Make sure to use them as references and to\n", + "understand them before completing the task.\n", + "\n", + "Input:\n", + "{input}\n", + "\n", + "Output:\n", + "{output}\n", + "\n", + "{grading_context_columns}\n", + "\n", + "Metric definition:\n", + "Professionalism refers to the use of a formal, respectful, and appropriate style of communication that is tailored to the context and audience. It often involves avoiding overly casual language, slang, or colloquialisms, and instead using clear, concise, and respectful language\n", + "\n", + "Grading rubric:\n", + "Professionalism: If the answer is written using a professional tone, below are the details for different scores: - Score 1: Language is extremely casual, informal, and may include slang or colloquialisms. Not suitable for professional contexts.- Score 2: Language is casual but generally respectful and avoids strong informality or slang. Acceptable in some informal professional settings.- Score 3: Language is balanced and avoids extreme informality or formality. Suitable for most professional contexts. - Score 4: Language is noticeably formal, respectful, and avoids casual elements. Appropriate for business or academic settings. - Score 5: Language is excessively formal, respectful, and avoids casual elements. Appropriate for the most formal settings such as textbooks. \n", + "\n", + "Examples:\n", + "\n", + "Input:\n", + "What is MLflow?\n", + "\n", + "Output:\n", + "MLflow is like your friendly neighborhood toolkit for managing your machine learning projects. It helps you track experiments, package your code and models, and collaborate with your team, making the whole ML workflow smoother. It's like your Swiss Army knife for machine learning!\n", + "\n", + "\n", + "\n", + "score: 2\n", + "justification: The response is written in a casual tone. It uses contractions, filler words such as 'like', and exclamation points, which make it sound less professional. \n", + " \n", + "\n", + "You must return the following fields in your response one below the other:\n", + "score: Your numerical score for the model's professionalism based on the rubric\n", + "justification: Your step-by-step reasoning about the model's professionalism score\n", + " )\n" + ] + } + ], + "source": [ + "from mlflow.metrics import EvaluationExample, make_genai_metric\n", + "\n", + "professionalism_metric = make_genai_metric(\n", + " name=\"professionalism\",\n", + " definition=(\n", + " \"Professionalism refers to the use of a formal, respectful, and appropriate style of communication that is tailored to the context and audience. It often involves avoiding overly casual language, slang, or colloquialisms, and instead using clear, concise, and respectful language\"\n", + " ),\n", + " grading_prompt=(\n", + " \"Professionalism: If the answer is written using a professional tone, below \"\n", + " \"are the details for different scores: \"\n", + " \"- Score 1: Language is extremely casual, informal, and may include slang or colloquialisms. Not suitable for professional contexts.\"\n", + " \"- Score 2: Language is casual but generally respectful and avoids strong informality or slang. Acceptable in some informal professional settings.\"\n", + " \"- Score 3: Language is balanced and avoids extreme informality or formality. Suitable for most professional contexts. \"\n", + " \"- Score 4: Language is noticeably formal, respectful, and avoids casual elements. Appropriate for business or academic settings. \"\n", + " \"- Score 5: Language is excessively formal, respectful, and avoids casual elements. Appropriate for the most formal settings such as textbooks. \"\n", + " ),\n", + " examples=[\n", + " EvaluationExample(\n", + " input=\"What is MLflow?\",\n", + " output=(\n", + " \"MLflow is like your friendly neighborhood toolkit for managing your machine learning projects. It helps you track experiments, package your code and models, and collaborate with your team, making the whole ML workflow smoother. It's like your Swiss Army knife for machine learning!\"\n", + " ),\n", + " score=2,\n", + " justification=(\n", + " \"The response is written in a casual tone. It uses contractions, filler words such as 'like', and exclamation points, which make it sound less professional. \"\n", + " ),\n", + " )\n", + " ],\n", + " version=\"v1\",\n", + " model=\"openai:/gpt-4\",\n", + " parameters={\"temperature\": 0.0},\n", + " grading_context_columns=[],\n", + " aggregations=[\"mean\", \"variance\", \"p90\"],\n", + " greater_is_better=True,\n", + ")\n", + "\n", + "print(professionalism_metric)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "bc615396-b1c1-4302-872d-d19be010382a", + "showTitle": false, + "title": "" + } + }, + "source": [ + "TODO: Try out your new professionalism metric on a sample output to make sure it behaves as you expect" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "0ca7e945-113a-49ac-8324-2f94efa45771", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Call `mlflow.evaluate` with your new professionalism metric. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "07bb41ae-c878-4384-b36e-3dfb9b8ac6d9", + "showTitle": false, + "title": "" + } + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8b7b987f9e46430fac997e9867255c5f", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Downloading artifacts: 0%| | 0/5 [00:00\n", + " .table-result-container {\n", + " max-height: 300px;\n", + " overflow: auto;\n", + " }\n", + " table, th, td {\n", + " border: 1px solid black;\n", + " border-collapse: collapse;\n", + " }\n", + " th, td {\n", + " padding: 5px;\n", + " }\n", + " th {\n", + " text-align: left;\n", + " }\n", + "
inputsground_truthoutputstoken_countperplexity/v1/scoreflesch_kincaid_grade_level/v1/scoreari_grade_level/v1/scoreprofessionalism/v1/scoreprofessionalism/v1/justification
How does useEffect() work?The useEffect() hook tells React that your component needs to do something after render. React will remember the function you passed (we’ll refer to it as our “effect”), and call it later after performing the DOM updates.useEffect() is a hook in React that allows you to perform side effects in functional components. It is triggered after the component is rendered and whenever the component's dependencies change, allowing you to run code at specific points during the component's lifecycle.4922.729148864712.715.04The language used in the output is formal and respectful. It avoids casual elements and provides a clear, concise explanation of the concept. It is appropriate for business or academic settings. However, it is not excessively formal to the point of being suitable for the most formal settings such as textbooks, hence it doesn't score a 5.
What does the static keyword in a function mean?Static members belongs to the class, rather than a specific instance. This means that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any. It will be shared by all objects.The \"static\" keyword in a function means that the function exists only within the scope of the current file, and cannot be called from other files. It also means that the function's local variables retain their values between multiple invocations of the function.5015.908230781610.513.54The language used in the output is formal and respectful. It avoids casual elements and is appropriate for business or academic settings. The explanation is clear and concise, making it easy to understand. The use of quotation marks around the term \"static\" shows a level of formality and precision. The language is not excessively formal, so it does not reach a score of 5, but it is more formal than a balanced language, which would score a 3. Therefore, a score of 4 is appropriate.
What does the 'finally' block in Python do?'Finally' defines a block of code to run when the try... except...else block is final. The finally block will be executed no matter if the try block raises an error or not.In Python, the 'finally' block is used to execute a set of statements whether an exception is raised or not, ensuring that certain code is always executed regardless of the outcome of a try-except block. It is commonly used for activities such as closing files or releasing resources that need to be done regardless of whether an exception occurred or not.6919.983512878414.016.74The language used in the output is formal and respectful. It avoids casual elements and provides a clear, concise explanation of the 'finally' block in Python. The tone is appropriate for business or academic settings, hence the score of 4.
What is the difference between multiprocessing and multithreading?Multithreading refers to the ability of a processor to execute multiple threads concurrently, where each thread runs a process. Whereas multiprocessing refers to the ability of a system to run multiple processors in parallel, where each processor can run one or more threads.Multiprocessing involves running multiple processes simultaneously on multiple cores to increase efficiency and achieve parallelism, while multithreading involves running multiple threads within a single process, allowing for concurrent execution and sharing of resources.4019.10858726524.429.34The language used in the output is formal and respectful. It avoids casual elements and is appropriate for business or academic settings. The explanation is clear, concise, and uses technical terms appropriately. However, it is not excessively formal to the point of being suitable for the most formal settings such as textbooks, hence it does not merit a score of 5.
" + ] + }, + "metadata": { + "application/vnd.databricks.v1+output": { + "addedWidgets": {}, + "aggData": [], + "aggError": "", + "aggOverflow": false, + "aggSchema": [], + "aggSeriesLimitReached": false, + "aggType": "", + "arguments": {}, + "columnCustomDisplayInfos": {}, + "data": [ + [ + "How does useEffect() work?", + "The useEffect() hook tells React that your component needs to do something after render. React will remember the function you passed (we’ll refer to it as our “effect”), and call it later after performing the DOM updates.", + "useEffect() is a hook in React that allows you to perform side effects in functional components. It is triggered after the component is rendered and whenever the component's dependencies change, allowing you to run code at specific points during the component's lifecycle.", + 49, + 22.7291488647, + 12.7, + 15, + 4, + "The language used in the output is formal and respectful. It avoids casual elements and provides a clear, concise explanation of the concept. It is appropriate for business or academic settings. However, it is not excessively formal to the point of being suitable for the most formal settings such as textbooks, hence it doesn't score a 5." + ], + [ + "What does the static keyword in a function mean?", + "Static members belongs to the class, rather than a specific instance. This means that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any. It will be shared by all objects.", + "The \"static\" keyword in a function means that the function exists only within the scope of the current file, and cannot be called from other files. It also means that the function's local variables retain their values between multiple invocations of the function.", + 50, + 15.9082307816, + 10.5, + 13.5, + 4, + "The language used in the output is formal and respectful. It avoids casual elements and is appropriate for business or academic settings. The explanation is clear and concise, making it easy to understand. The use of quotation marks around the term \"static\" shows a level of formality and precision. The language is not excessively formal, so it does not reach a score of 5, but it is more formal than a balanced language, which would score a 3. Therefore, a score of 4 is appropriate." + ], + [ + "What does the 'finally' block in Python do?", + "'Finally' defines a block of code to run when the try... except...else block is final. The finally block will be executed no matter if the try block raises an error or not.", + "In Python, the 'finally' block is used to execute a set of statements whether an exception is raised or not, ensuring that certain code is always executed regardless of the outcome of a try-except block. It is commonly used for activities such as closing files or releasing resources that need to be done regardless of whether an exception occurred or not.", + 69, + 19.9835128784, + 14, + 16.7, + 4, + "The language used in the output is formal and respectful. It avoids casual elements and provides a clear, concise explanation of the 'finally' block in Python. The tone is appropriate for business or academic settings, hence the score of 4." + ], + [ + "What is the difference between multiprocessing and multithreading?", + "Multithreading refers to the ability of a processor to execute multiple threads concurrently, where each thread runs a process. Whereas multiprocessing refers to the ability of a system to run multiple processors in parallel, where each processor can run one or more threads.", + "Multiprocessing involves running multiple processes simultaneously on multiple cores to increase efficiency and achieve parallelism, while multithreading involves running multiple threads within a single process, allowing for concurrent execution and sharing of resources.", + 40, + 19.108587265, + 24.4, + 29.3, + 4, + "The language used in the output is formal and respectful. It avoids casual elements and is appropriate for business or academic settings. The explanation is clear, concise, and uses technical terms appropriately. However, it is not excessively formal to the point of being suitable for the most formal settings such as textbooks, hence it does not merit a score of 5." + ] + ], + "datasetInfos": [], + "dbfsResultPath": null, + "isJsonSchema": true, + "metadata": {}, + "overflow": false, + "plotOptions": { + "customPlotOptions": {}, + "displayType": "table", + "pivotAggregation": null, + "pivotColumns": null, + "xColumns": null, + "yColumns": null + }, + "removedWidgets": [], + "schema": [ + { + "metadata": "{}", + "name": "inputs", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "ground_truth", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "outputs", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "token_count", + "type": "\"long\"" + }, + { + "metadata": "{}", + "name": "perplexity/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "flesch_kincaid_grade_level/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "ari_grade_level/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "professionalism/v1/score", + "type": "\"long\"" + }, + { + "metadata": "{}", + "name": "professionalism/v1/justification", + "type": "\"string\"" + } + ], + "type": "table" + } + }, + "output_type": "display_data" + } + ], + "source": [ + "results.tables[\"eval_results_table\"]" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "52e9f69f-2f43-46ba-bf88-b4aebae741f4", + "showTitle": false, + "title": "" + } + }, + "source": [ + "The professionalism score of the `basic_qa_model` is not very good. Let's try to create a new model that can perform better" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "b4ea81e9-6e91-43e7-8539-8dab7b5f52de", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Call `mlflow.evaluate()` using the new model. Observe that the professionalism score has increased!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "5b21ef8f-50ef-4229-83c9-cc2251a081e2", + "showTitle": false, + "title": "" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023/10/19 22:41:57 WARNING mlflow.models.model: Model logged without a signature. Signatures will be required for upcoming model registry features as they validate model inputs and denote the expected schema of model outputs. Please visit https://www.mlflow.org/docs/2.7.2/models.html#set-signature-on-logged-model for instructions on setting a model signature on your logged model.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "8714e76f6c9b40a2949a26cb871dfc36", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Uploading artifacts: 0%| | 0/5 [00:00\n", + " .table-result-container {\n", + " max-height: 300px;\n", + " overflow: auto;\n", + " }\n", + " table, th, td {\n", + " border: 1px solid black;\n", + " border-collapse: collapse;\n", + " }\n", + " th, td {\n", + " padding: 5px;\n", + " }\n", + " th {\n", + " text-align: left;\n", + " }\n", + "
inputsground_truthoutputstoken_countperplexity/v1/scoreflesch_kincaid_grade_level/v1/scoreari_grade_level/v1/scoreprofessionalism/v1/scoreprofessionalism/v1/justification
How does useEffect() work?The useEffect() hook tells React that your component needs to do something after render. React will remember the function you passed (we’ll refer to it as our “effect”), and call it later after performing the DOM updates.I would be delighted to expound upon the mechanics of the useEffect() function. useEffect() is a hook provided by the React library that facilitates the synchronization of side effects with the React components lifecycle method. It serves as a tool for performing actions that necessitate interaction with external entities, such as fetching data from an API or subscribing to events. The principal construct of useEffect() hinges on the concept of dependencies. By specifying an array of dependencies, which can consist of state variables, the useEffect() hook will execute the specified effect only when these dependencies undergo changes.\n", + "\n", + "Once the component mounts, useEffect() initially triggers the specified effect, consequently performing the designated side effect. During subsequent renders, useEffect() will carry out the cleanup operation, accommodating for the possibility of multiple effects being executed successively. Should any of the supplied dependencies undergo alteration, useEffect() will then reexecute the effect, ensuring synchronization with the altered state of the component.\n", + "\n", + "In essence, useEffect() effectively accommodates for the execution of side effects with accuracy and efficiency, aligning them with the React component lifecycle and facilitating the necessary synchronization with state updates.22025.958095550516.118.04The language used in the output is formal and respectful, with no casual elements or slang. The explanation is clear, concise, and detailed, demonstrating a high level of professionalism. However, it does not reach the level of excessive formality that would be expected in the most formal settings such as textbooks, hence the score is not 5.
What does the static keyword in a function mean?Static members belongs to the class, rather than a specific instance. This means that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any. It will be shared by all objects.The static keyword, when applied to a function within the realm of computer programming, denotes a special attribute that imparts a significant alteration to the behavior of the said function. Specifically, the static keyword, operating in this context, restricts the visibility and scope of the function to the current translation unit, consequently prohibiting its accessibility from other external units of code. Consequently, the function annotated with the static modifier may solely be invoked within the same translation unit, and any attempts to access or execute it from an external module shall be rendered futile.10731.069862365717.920.85The language used in the output is excessively formal and respectful. It avoids casual elements and is appropriate for the most formal settings such as textbooks. The response is clear, concise, and uses technical language appropriate for the subject matter. It avoids any slang or colloquialisms, and is tailored to an audience that understands the context of the question.
What does the 'finally' block in Python do?'Finally' defines a block of code to run when the try... except...else block is final. The finally block will be executed no matter if the try block raises an error or not.The 'finally' block in Python encompasses a set of statements that are executed regardless of whether an exception occurred or not in the preceding 'try' and 'except' blocks. It serves as a reliable means to perform essential tasks, such as closing files or releasing resources, in order to uphold code integrity and adherent to best coding practices. Consequently, the 'finally' block guarantees the execution of these imperative actions, regardless of how the preceding logic unfolds, thus enhancing the robustness and fault tolerance of Python programs.10333.556705474915.518.94The language used in the output is formal and respectful. It avoids casual elements and is appropriate for business or academic settings. The explanation is clear, concise, and uses technical terms appropriately. However, it is not excessively formal to the point of being suitable for the most formal settings such as textbooks, hence it does not warrant a score of 5.
What is the difference between multiprocessing and multithreading?Multithreading refers to the ability of a processor to execute multiple threads concurrently, where each thread runs a process. Whereas multiprocessing refers to the ability of a system to run multiple processors in parallel, where each processor can run one or more threads.Greetings,\n", + "\n", + "I hereby address your esteemed query regarding the distinction between multiprocessing and multithreading.\n", + "\n", + "In essence, multiprocessing and multithreading serve the purpose of enhancing the efficiency and effectiveness of simultaneous execution within a computer system. However, they differ considerably in terms of their operational principles and implementation.\n", + "\n", + "To begin with, multiprocessing refers to the deployment of multiple processors or central processing units (CPUs) to execute multiple tasks or processes concurrently. Each processor operates independently and can execute distinct tasks simultaneously, thereby realizing parallel processing. This architectural approach is particularly beneficial when it comes to computationally intensive applications, as it allows for increased speed and performance.\n", + "\n", + "On the other hand, multithreading entails the notion of creating multiple threads within a single process. A thread, as a lightweight unit of execution, constitutes a sequence of instructions that can be executed independently. By employing multiple threads, a program can concurrently carry out multiple tasks within a shared memory space. Notably, these threads share the same resources and context, including memory and file systems, which can lead to potential synchronization and communication challenges.\n", + "\n", + "In summary, multiprocessing leverages multiple processors or CPUs to execute distinct tasks in parallel, whereas multithreading enables the concurrent execution of multiple threads within a single process, sharing resources and memory space. The choice between multiprocessing and multithreading depends on the specific requirements, resources, and objectives of the system or application at hand.\n", + "\n", + "I trust that this elucidation satisfactorily addresses your inquiry with regards to the disparity between multiprocessing and multithreading. Should you have any further queries or necessitate additional clarification, I remain at your disposal.\n", + "\n", + "Yours respectfully,32411.512454986616.618.85The response is written in an excessively formal and respectful tone. It uses formal greetings and sign-offs, avoids casual elements, and uses complex sentence structures. The language is appropriate for the most formal settings such as textbooks.
" + ] + }, + "metadata": { + "application/vnd.databricks.v1+output": { + "addedWidgets": {}, + "aggData": [], + "aggError": "", + "aggOverflow": false, + "aggSchema": [], + "aggSeriesLimitReached": false, + "aggType": "", + "arguments": {}, + "columnCustomDisplayInfos": {}, + "data": [ + [ + "How does useEffect() work?", + "The useEffect() hook tells React that your component needs to do something after render. React will remember the function you passed (we’ll refer to it as our “effect”), and call it later after performing the DOM updates.", + "I would be delighted to expound upon the mechanics of the useEffect() function. useEffect() is a hook provided by the React library that facilitates the synchronization of side effects with the React components lifecycle method. It serves as a tool for performing actions that necessitate interaction with external entities, such as fetching data from an API or subscribing to events. The principal construct of useEffect() hinges on the concept of dependencies. By specifying an array of dependencies, which can consist of state variables, the useEffect() hook will execute the specified effect only when these dependencies undergo changes.\n\nOnce the component mounts, useEffect() initially triggers the specified effect, consequently performing the designated side effect. During subsequent renders, useEffect() will carry out the cleanup operation, accommodating for the possibility of multiple effects being executed successively. Should any of the supplied dependencies undergo alteration, useEffect() will then reexecute the effect, ensuring synchronization with the altered state of the component.\n\nIn essence, useEffect() effectively accommodates for the execution of side effects with accuracy and efficiency, aligning them with the React component lifecycle and facilitating the necessary synchronization with state updates.", + 220, + 25.9580955505, + 16.1, + 18, + 4, + "The language used in the output is formal and respectful, with no casual elements or slang. The explanation is clear, concise, and detailed, demonstrating a high level of professionalism. However, it does not reach the level of excessive formality that would be expected in the most formal settings such as textbooks, hence the score is not 5." + ], + [ + "What does the static keyword in a function mean?", + "Static members belongs to the class, rather than a specific instance. This means that only one instance of a static member exists, even if you create multiple objects of the class, or if you don't create any. It will be shared by all objects.", + "The static keyword, when applied to a function within the realm of computer programming, denotes a special attribute that imparts a significant alteration to the behavior of the said function. Specifically, the static keyword, operating in this context, restricts the visibility and scope of the function to the current translation unit, consequently prohibiting its accessibility from other external units of code. Consequently, the function annotated with the static modifier may solely be invoked within the same translation unit, and any attempts to access or execute it from an external module shall be rendered futile.", + 107, + 31.0698623657, + 17.9, + 20.8, + 5, + "The language used in the output is excessively formal and respectful. It avoids casual elements and is appropriate for the most formal settings such as textbooks. The response is clear, concise, and uses technical language appropriate for the subject matter. It avoids any slang or colloquialisms, and is tailored to an audience that understands the context of the question." + ], + [ + "What does the 'finally' block in Python do?", + "'Finally' defines a block of code to run when the try... except...else block is final. The finally block will be executed no matter if the try block raises an error or not.", + "The 'finally' block in Python encompasses a set of statements that are executed regardless of whether an exception occurred or not in the preceding 'try' and 'except' blocks. It serves as a reliable means to perform essential tasks, such as closing files or releasing resources, in order to uphold code integrity and adherent to best coding practices. Consequently, the 'finally' block guarantees the execution of these imperative actions, regardless of how the preceding logic unfolds, thus enhancing the robustness and fault tolerance of Python programs.", + 103, + 33.5567054749, + 15.5, + 18.9, + 4, + "The language used in the output is formal and respectful. It avoids casual elements and is appropriate for business or academic settings. The explanation is clear, concise, and uses technical terms appropriately. However, it is not excessively formal to the point of being suitable for the most formal settings such as textbooks, hence it does not warrant a score of 5." + ], + [ + "What is the difference between multiprocessing and multithreading?", + "Multithreading refers to the ability of a processor to execute multiple threads concurrently, where each thread runs a process. Whereas multiprocessing refers to the ability of a system to run multiple processors in parallel, where each processor can run one or more threads.", + "Greetings,\n\nI hereby address your esteemed query regarding the distinction between multiprocessing and multithreading.\n\nIn essence, multiprocessing and multithreading serve the purpose of enhancing the efficiency and effectiveness of simultaneous execution within a computer system. However, they differ considerably in terms of their operational principles and implementation.\n\nTo begin with, multiprocessing refers to the deployment of multiple processors or central processing units (CPUs) to execute multiple tasks or processes concurrently. Each processor operates independently and can execute distinct tasks simultaneously, thereby realizing parallel processing. This architectural approach is particularly beneficial when it comes to computationally intensive applications, as it allows for increased speed and performance.\n\nOn the other hand, multithreading entails the notion of creating multiple threads within a single process. A thread, as a lightweight unit of execution, constitutes a sequence of instructions that can be executed independently. By employing multiple threads, a program can concurrently carry out multiple tasks within a shared memory space. Notably, these threads share the same resources and context, including memory and file systems, which can lead to potential synchronization and communication challenges.\n\nIn summary, multiprocessing leverages multiple processors or CPUs to execute distinct tasks in parallel, whereas multithreading enables the concurrent execution of multiple threads within a single process, sharing resources and memory space. The choice between multiprocessing and multithreading depends on the specific requirements, resources, and objectives of the system or application at hand.\n\nI trust that this elucidation satisfactorily addresses your inquiry with regards to the disparity between multiprocessing and multithreading. Should you have any further queries or necessitate additional clarification, I remain at your disposal.\n\nYours respectfully,", + 324, + 11.5124549866, + 16.6, + 18.8, + 5, + "The response is written in an excessively formal and respectful tone. It uses formal greetings and sign-offs, avoids casual elements, and uses complex sentence structures. The language is appropriate for the most formal settings such as textbooks." + ] + ], + "datasetInfos": [], + "dbfsResultPath": null, + "isJsonSchema": true, + "metadata": {}, + "overflow": false, + "plotOptions": { + "customPlotOptions": {}, + "displayType": "table", + "pivotAggregation": null, + "pivotColumns": null, + "xColumns": null, + "yColumns": null + }, + "removedWidgets": [], + "schema": [ + { + "metadata": "{}", + "name": "inputs", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "ground_truth", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "outputs", + "type": "\"string\"" + }, + { + "metadata": "{}", + "name": "token_count", + "type": "\"long\"" + }, + { + "metadata": "{}", + "name": "perplexity/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "flesch_kincaid_grade_level/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "ari_grade_level/v1/score", + "type": "\"double\"" + }, + { + "metadata": "{}", + "name": "professionalism/v1/score", + "type": "\"long\"" + }, + { + "metadata": "{}", + "name": "professionalism/v1/justification", + "type": "\"string\"" + } + ], + "type": "table" + } + }, + "output_type": "display_data" + } + ], + "source": [ + "results.tables[\"eval_results_table\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "e44bbe77-433a-4e03-a44e-d17eb6c06820", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 2 + }, + "notebookName": "LLM Evaluation Examples -- QA", + "widgets": {} + }, + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From 69d391ef8bd0e7f70bd832476bedb4d7c76c2ded Mon Sep 17 00:00:00 2001 From: Sunish Sheth Date: Fri, 20 Oct 2023 13:56:35 -0700 Subject: [PATCH 040/101] Making sure that eval_config values are passed to the eval_fn (#10024) Signed-off-by: Sunish Sheth --- mlflow/models/evaluation/default_evaluator.py | 11 ++++++++ tests/evaluate/test_default_evaluator.py | 25 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index a7a011958d8ad..47a35d9208eef 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1172,15 +1172,26 @@ def _get_args_for_metrics(self, extra_metric, eval_df): elif column == "metrics": eval_fn_args.append(copy.deepcopy(self.metrics_values)) else: + # case when column passed in col_mapping contains the entire column if not isinstance(column, str): eval_fn_args.append(column) + + # case column in col_mapping is string and the column value + # is part of the input_df elif column in input_df.columns: eval_fn_args.append(input_df[column]) + + # case column in col_mapping is string and the column value + # is part of the output_df(other than predictions) elif ( self.other_output_columns is not None and column in self.other_output_columns.columns ): eval_fn_args.append(self.other_output_columns[column]) + + # case where the param is defined as part of the evaluator_config + elif column in self.evaluator_config: + eval_fn_args.append(self.evaluator_config.get(column)) elif param.default == inspect.Parameter.empty: params_not_found.append(param_name) diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index cc8659f030469..6901af5a7584e 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -3159,3 +3159,28 @@ def test_evaluate_with_correctness(): "correctness/v1/variance": 0.0, "correctness/v1/p90": 3.0, } + + +def test_evaluate_custom_metrics_string_values(): + with mlflow.start_run(): + model_info = mlflow.pyfunc.log_model( + artifact_path="model", python_model=language_model, input_example=["a", "b"] + ) + data = pd.DataFrame({"text": ["Hello world", "My name is MLflow"]}) + results = mlflow.evaluate( + model_info.model_uri, + data, + extra_metrics=[ + make_metric( + eval_fn=lambda predictions, metrics, eval_config: MetricValue( + aggregate_results={"eval_config_value_average": eval_config} + ), + name="cm", + greater_is_better=True, + long_name="custom_metric", + ) + ], + evaluators="default", + evaluator_config={"eval_config": 3}, + ) + assert results.metrics["cm/eval_config_value_average"] == 3 From 84123340b88903361b4e155f06c1ba88a2bc5dab Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Fri, 20 Oct 2023 15:28:54 -0700 Subject: [PATCH 041/101] Display OpenAI error to users for make_genai_metric (#9928) Signed-off-by: Ann Zhang --- mlflow/metrics/genai/genai_metric.py | 15 ++++++++++++++- mlflow/metrics/genai/model_utils.py | 16 +++++++++++++++- 2 files changed, 29 insertions(+), 2 deletions(-) diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index c763ddbf5f009..54b30055c83ef 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -10,7 +10,13 @@ from mlflow.metrics.genai import model_utils from mlflow.metrics.genai.utils import _get_latest_metric_version from mlflow.models import EvaluationMetric, make_metric -from mlflow.protos.databricks_pb2 import INTERNAL_ERROR, INVALID_PARAMETER_VALUE +from mlflow.protos.databricks_pb2 import ( + BAD_REQUEST, + INTERNAL_ERROR, + INVALID_PARAMETER_VALUE, + UNAUTHENTICATED, + ErrorCode, +) from mlflow.utils.annotations import experimental from mlflow.utils.class_utils import _get_class_from_string @@ -213,6 +219,7 @@ def eval_fn( """ This is the function that is called when the metric is evaluated. """ + eval_values = dict(zip(grading_context_columns, args)) outputs = predictions.to_list() @@ -267,6 +274,12 @@ def score_model_on_one_payload( ) return _extract_score_and_justification(raw_result) except Exception as e: + if isinstance(e, MlflowException): + if e.error_code in [ + ErrorCode.Name(BAD_REQUEST), + ErrorCode.Name(UNAUTHENTICATED), + ]: + raise MlflowException(e) _logger.info(f"Failed to score model on payload. Error: {e!r}") return None, None diff --git a/mlflow/metrics/genai/model_utils.py b/mlflow/metrics/genai/model_utils.py index 3933727f0e51a..5643155cd9a03 100644 --- a/mlflow/metrics/genai/model_utils.py +++ b/mlflow/metrics/genai/model_utils.py @@ -5,7 +5,7 @@ import requests from mlflow.exceptions import MlflowException -from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE +from mlflow.protos.databricks_pb2 import BAD_REQUEST, INVALID_PARAMETER_VALUE, UNAUTHENTICATED from mlflow.utils.uri import append_to_uri_path ROUTE_TYPE = "llm/v1/completions" @@ -85,6 +85,20 @@ def _call_openai_api(openai_uri, payload, timeout): timeout=timeout, ).json() + if "error" in resp: + error_type = resp["error"]["type"] + if error_type == "invalid_request_error": + raise MlflowException( + f"Invalid Request to OpenAI. Error response:\n {resp}", error_code=BAD_REQUEST + ) + elif error_type == "authentication_error": + raise MlflowException( + f"Authentication Error for OpenAI. Error response:\n {resp}", + error_code=UNAUTHENTICATED, + ) + else: + raise MlflowException(f"Error response from OpenAI:\n {resp}") + return json.loads(openai_provider._prepare_completion_response_payload(resp).json()) From 21446b847794d80afd1181738bc88fb4058e4d2b Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Fri, 20 Oct 2023 17:07:11 -0700 Subject: [PATCH 042/101] Cleanup (#10056) Signed-off-by: Prithvi Kannan --- tests/evaluate/test_default_evaluator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index 6901af5a7584e..14bc3b6efc74c 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -3010,7 +3010,7 @@ def test_evaluate_with_latency(): model_info = mlflow.pyfunc.log_model( artifact_path="model", python_model=language_model, input_example=["a", "b"] ) - data = pd.DataFrame({"text": ["sentence not", "All women are bad."]}) + data = pd.DataFrame({"text": ["sentence not", "Hello world."]}) results = mlflow.evaluate( model_info.model_uri, data, From 32313e8a3691a64ed25d03eb5c2fbaf93eb863da Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Sat, 21 Oct 2023 11:07:57 +0900 Subject: [PATCH 043/101] Add a warning to `search_model_versions` doc (#10023) Signed-off-by: harupy --- mlflow/tracking/_model_registry/client.py | 4 ++++ mlflow/tracking/_model_registry/fluent.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/mlflow/tracking/_model_registry/client.py b/mlflow/tracking/_model_registry/client.py index 2968e7c1e324e..9167b3aaef22e 100644 --- a/mlflow/tracking/_model_registry/client.py +++ b/mlflow/tracking/_model_registry/client.py @@ -274,6 +274,10 @@ def search_model_versions( """ Search for model versions in backend that satisfy the filter criteria. + .. warning: + + The model version search results may not have aliases populated for performance reasons. + :param filter_string: A filter string expression. Currently supports a single filter condition either name of model like ``name = 'model_name'`` or ``run_id = '...'``. diff --git a/mlflow/tracking/_model_registry/fluent.py b/mlflow/tracking/_model_registry/fluent.py index d23a2b04d4b81..1c47a96afb3b3 100644 --- a/mlflow/tracking/_model_registry/fluent.py +++ b/mlflow/tracking/_model_registry/fluent.py @@ -234,6 +234,10 @@ def search_model_versions( """ Search for model versions that satisfy the filter criteria. + .. warning: + + The model version search results may not have aliases populated for performance reasons. + :param filter_string: Filter query string (e.g., ``"name = 'a_model_name' and tag.key = 'value1'"``), defaults to searching for all model versions. The following identifiers, comparators, From 12dcc9dcc0d5b21581dda91c377c14905e38d9d8 Mon Sep 17 00:00:00 2001 From: Sunish Sheth Date: Fri, 20 Oct 2023 21:11:32 -0700 Subject: [PATCH 044/101] [Bug fix] Fixing support for numpy array for eval (#10048) Signed-off-by: Sunish Sheth --- mlflow/models/evaluation/default_evaluator.py | 32 ++++++++++++----- tests/evaluate/test_default_evaluator.py | 34 +++++++++++++++++++ 2 files changed, 57 insertions(+), 9 deletions(-) diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index 47a35d9208eef..1404b2c81208b 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1538,17 +1538,31 @@ def _log_eval_table(self): metric_prefix = self.evaluator_config.get("metric_prefix", "") if not isinstance(metric_prefix, str): metric_prefix = "" - if self.dataset.has_targets: - data = self.dataset.features_data.assign( - **{ - self.dataset.targets_name or "target": self.y, - self.dataset.predictions_name or "outputs": self.y_pred, - } - ) + if isinstance(self.dataset.features_data, pd.DataFrame): + # Handle DataFrame case + if self.dataset.has_targets: + data = self.dataset.features_data.assign( + **{ + self.dataset.targets_name or "target": self.y, + self.dataset.predictions_name or "outputs": self.y_pred, + } + ) + else: + data = self.dataset.features_data.assign(outputs=self.y_pred) else: - data = self.dataset.features_data.assign(outputs=self.y_pred) + # Handle NumPy array case, converting it to a DataFrame + data = pd.DataFrame(self.dataset.features_data, columns=self.dataset.feature_names) + if self.dataset.has_targets: + data = data.assign( + **{ + self.dataset.targets_name or "target": self.y, + self.dataset.predictions_name or "outputs": self.y_pred, + } + ) + else: + data = data.assign(outputs=self.y_pred) - # include other_output_columns in the eval table + # Include other_output_columns in the eval table if self.other_output_columns is not None: data = data.assign(**self.other_output_columns) diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index 14bc3b6efc74c..2c55c1da84371 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -3184,3 +3184,37 @@ def test_evaluate_custom_metrics_string_values(): evaluator_config={"eval_config": 3}, ) assert results.metrics["cm/eval_config_value_average"] == 3 + + +def test_evaluate_with_numpy_array(): + data = [ + ["What is MLflow?"], + ] + ground_truth = [ + "MLflow is an open-source platform for managing the end-to-end machine learning", + ] + + with mlflow.start_run(): + logged_model = mlflow.pyfunc.log_model( + artifact_path="model", python_model=language_model, input_example=["a", "b"] + ) + results = mlflow.evaluate( + logged_model.model_uri, + data, + targets=ground_truth, + extra_metrics=[mlflow.metrics.toxicity()], + ) + + assert results.metrics.keys() == { + "toxicity/v1/mean", + "toxicity/v1/variance", + "toxicity/v1/p90", + "toxicity/v1/ratio", + } + assert len(results.tables) == 1 + assert results.tables["eval_results_table"].columns.tolist() == [ + "feature_1", + "target", + "outputs", + "toxicity/v1/score", + ] From 7f417eeb14332c0d9322c9ebe6094f86f50487be Mon Sep 17 00:00:00 2001 From: Hiroyuki Moriya <41197469+Gekko0114@users.noreply.github.com> Date: Sat, 21 Oct 2023 18:00:41 +0900 Subject: [PATCH 045/101] splits tests to run them faster for models (#10062) Signed-off-by: moriya --- .github/workflows/master.yml | 7 ++++++- conftest.py | 40 ++++++++++++++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index c4cceb991197e..6b59eeb79afb2 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -241,6 +241,11 @@ jobs: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false runs-on: ubuntu-latest timeout-minutes: 120 + strategy: + matrix: + group: [1, 2, 3] + include: + - splits: 3 steps: - uses: actions/checkout@v3 with: @@ -257,7 +262,7 @@ jobs: - uses: ./.github/actions/pipdeptree - name: Run tests run: | - pytest tests/models + pytest --splits=${{ matrix.splits }} --group=${{ matrix.group }} tests/models # NOTE: numpy is pinned in this suite due to its heavy reliance on shap, which internally uses # references to the now fully deprecated (as of 1.24.x) numpy types (i.e., np.bool). diff --git a/conftest.py b/conftest.py index 640680cb3dc2a..a006a1c80f298 100644 --- a/conftest.py +++ b/conftest.py @@ -27,6 +27,18 @@ def pytest_addoption(parser): default=False, help="Ignore tests for model flavors.", ) + parser.addoption( + "--splits", + default=None, + type=int, + help="The number of groups to split tests into.", + ) + parser.addoption( + "--group", + default=None, + type=int, + help="The group of tests to run.", + ) def pytest_configure(config): @@ -36,6 +48,29 @@ def pytest_configure(config): config.addinivalue_line("markers", "allow_infer_pip_requirements_fallback") +@pytest.hookimpl(tryfirst=True) +def pytest_cmdline_main(config): + group = config.getoption("group") + splits = config.getoption("splits") + + if splits is None and group is None: + return None + + if splits and group is None: + raise pytest.UsageError("`--group` is required") + + if group and splits is None: + raise pytest.UsageError("`--splits` is required") + + if splits < 0: + raise pytest.UsageError("`--splits` must be >= 1") + + if group < 1 or group > splits: + raise pytest.UsageError("`--group` must be between 1 and {splits}") + + return None + + def pytest_sessionstart(session): if uri := MLFLOW_TRACKING_URI.get(): click.echo( @@ -147,6 +182,7 @@ def pytest_ignore_collect(path, config): outcome.force_result(True) +@pytest.hookimpl(trylast=True) def pytest_collection_modifyitems(session, config, items): # pylint: disable=unused-argument # Executing `tests.server.test_prometheus_exporter` after `tests.server.test_handlers` # results in an error because Flask >= 2.2.0 doesn't allow calling setup method such as @@ -154,6 +190,10 @@ def pytest_collection_modifyitems(session, config, items): # pylint: disable=un # execute `tests.server.test_prometheus_exporter` first by reordering the test items. items.sort(key=lambda item: item.module.__name__ != "tests.server.test_prometheus_exporter") + # Select the tests to run based on the group and splits + if (splits := config.getoption("--splits")) and (group := config.getoption("--group")): + items[:] = items[(group - 1) :: splits] + @pytest.hookimpl(hookwrapper=True) def pytest_terminal_summary( From 2c882e37348d8c387c5cc01acd4dde74e157fe85 Mon Sep 17 00:00:00 2001 From: Dennis Liu <146855708+Dennis40816@users.noreply.github.com> Date: Sat, 21 Oct 2023 18:41:36 +0800 Subject: [PATCH 046/101] Add context manager for creating or using a spark session (#10050) Signed-off-by: Dennis Liu Signed-off-by: harupy Co-authored-by: harupy --- examples/multistep_workflow/als.py | 63 ++++---- examples/multistep_workflow/etl_data.py | 26 ++-- examples/multistep_workflow/train_keras.py | 160 +++++++++++---------- tests/models/test_signature.py | 16 +-- tests/pyfunc/test_spark.py | 2 +- tests/spark/test_spark_model_export.py | 29 ++-- tests/types/test_schema.py | 10 +- 7 files changed, 156 insertions(+), 150 deletions(-) diff --git a/examples/multistep_workflow/als.py b/examples/multistep_workflow/als.py index 0bdcb781a668a..b6fbd83ca04f5 100644 --- a/examples/multistep_workflow/als.py +++ b/examples/multistep_workflow/als.py @@ -23,45 +23,46 @@ def train_als(ratings_data, split_prop, max_iter, reg_param, rank, cold_start_strategy): seed = 42 - spark = pyspark.sql.SparkSession.builder.getOrCreate() + with pyspark.sql.SparkSession.builder.getOrCreate() as spark: + ratings_df = spark.read.parquet(ratings_data) + (training_df, test_df) = ratings_df.randomSplit([split_prop, 1 - split_prop], seed=seed) + training_df.cache() + test_df.cache() - ratings_df = spark.read.parquet(ratings_data) - (training_df, test_df) = ratings_df.randomSplit([split_prop, 1 - split_prop], seed=seed) - training_df.cache() - test_df.cache() + mlflow.log_metric("training_nrows", training_df.count()) + mlflow.log_metric("test_nrows", test_df.count()) - mlflow.log_metric("training_nrows", training_df.count()) - mlflow.log_metric("test_nrows", test_df.count()) + print(f"Training: {training_df.count()}, test: {test_df.count()}") - print(f"Training: {training_df.count()}, test: {test_df.count()}") + als = ( + ALS() + .setUserCol("userId") + .setItemCol("movieId") + .setRatingCol("rating") + .setPredictionCol("predictions") + .setMaxIter(max_iter) + .setSeed(seed) + .setRegParam(reg_param) + .setColdStartStrategy(cold_start_strategy) + .setRank(rank) + ) - als = ( - ALS() - .setUserCol("userId") - .setItemCol("movieId") - .setRatingCol("rating") - .setPredictionCol("predictions") - .setMaxIter(max_iter) - .setSeed(seed) - .setRegParam(reg_param) - .setColdStartStrategy(cold_start_strategy) - .setRank(rank) - ) + als_model = Pipeline(stages=[als]).fit(training_df) - als_model = Pipeline(stages=[als]).fit(training_df) + reg_eval = RegressionEvaluator( + predictionCol="predictions", labelCol="rating", metricName="mse" + ) - reg_eval = RegressionEvaluator(predictionCol="predictions", labelCol="rating", metricName="mse") + predicted_test_dF = als_model.transform(test_df) - predicted_test_dF = als_model.transform(test_df) + test_mse = reg_eval.evaluate(predicted_test_dF) + train_mse = reg_eval.evaluate(als_model.transform(training_df)) - test_mse = reg_eval.evaluate(predicted_test_dF) - train_mse = reg_eval.evaluate(als_model.transform(training_df)) - - print(f"The model had a MSE on the test set of {test_mse}") - print(f"The model had a MSE on the (train) set of {train_mse}") - mlflow.log_metric("test_mse", test_mse) - mlflow.log_metric("train_mse", train_mse) - mlflow.spark.log_model(als_model, "als-model") + print(f"The model had a MSE on the test set of {test_mse}") + print(f"The model had a MSE on the (train) set of {train_mse}") + mlflow.log_metric("test_mse", test_mse) + mlflow.log_metric("train_mse", train_mse) + mlflow.spark.log_model(als_model, "als-model") if __name__ == "__main__": diff --git a/examples/multistep_workflow/etl_data.py b/examples/multistep_workflow/etl_data.py index 2dda4756831e7..eda48e459b009 100644 --- a/examples/multistep_workflow/etl_data.py +++ b/examples/multistep_workflow/etl_data.py @@ -22,20 +22,20 @@ def etl_data(ratings_csv, max_row_limit): with mlflow.start_run(): tmpdir = tempfile.mkdtemp() ratings_parquet_dir = os.path.join(tmpdir, "ratings-parquet") - spark = pyspark.sql.SparkSession.builder.getOrCreate() print(f"Converting ratings CSV {ratings_csv} to Parquet {ratings_parquet_dir}") - ratings_df = ( - spark.read.option("header", "true") - .option("inferSchema", "true") - .csv(ratings_csv) - .drop("timestamp") - ) # Drop unused column - ratings_df.show() - if max_row_limit != -1: - ratings_df = ratings_df.limit(max_row_limit) - ratings_df.write.parquet(ratings_parquet_dir) - print(f"Uploading Parquet ratings: {ratings_parquet_dir}") - mlflow.log_artifacts(ratings_parquet_dir, "ratings-parquet-dir") + with pyspark.sql.SparkSession.builder.getOrCreate() as spark: + ratings_df = ( + spark.read.option("header", "true") + .option("inferSchema", "true") + .csv(ratings_csv) + .drop("timestamp") + ) # Drop unused column + ratings_df.show() + if max_row_limit != -1: + ratings_df = ratings_df.limit(max_row_limit) + ratings_df.write.parquet(ratings_parquet_dir) + print(f"Uploading Parquet ratings: {ratings_parquet_dir}") + mlflow.log_artifacts(ratings_parquet_dir, "ratings-parquet-dir") if __name__ == "__main__": diff --git a/examples/multistep_workflow/train_keras.py b/examples/multistep_workflow/train_keras.py index d59fabc5975cd..7f2c091c509a3 100644 --- a/examples/multistep_workflow/train_keras.py +++ b/examples/multistep_workflow/train_keras.py @@ -29,85 +29,87 @@ def train_keras(ratings_data, als_model_uri, hidden_units): np.random.seed(0) tf.set_random_seed(42) # For reproducibility - spark = pyspark.sql.SparkSession.builder.getOrCreate() - als_model = mlflow.spark.load_model(als_model_uri).stages[0] - - ratings_df = spark.read.parquet(ratings_data) - - (training_df, test_df) = ratings_df.randomSplit([0.8, 0.2], seed=42) - training_df.cache() - test_df.cache() - - mlflow.log_metric("training_nrows", training_df.count()) - mlflow.log_metric("test_nrows", test_df.count()) - - print(f"Training: {training_df.count()}, test: {test_df.count()}") - - user_factors = als_model.userFactors.selectExpr("id as userId", "features as uFeatures") - item_factors = als_model.itemFactors.selectExpr("id as movieId", "features as iFeatures") - joined_train_df = training_df.join(item_factors, on="movieId").join(user_factors, on="userId") - joined_test_df = test_df.join(item_factors, on="movieId").join(user_factors, on="userId") - - # We'll combine the movies and ratings vectors into a single vector of length 24. - # We will then explode this features vector into a set of columns. - def concat_arrays(*args): - return list(chain(*args)) - - concat_arrays_udf = udf(concat_arrays, ArrayType(FloatType())) - - concat_train_df = joined_train_df.select( - "userId", - "movieId", - concat_arrays_udf(col("iFeatures"), col("uFeatures")).alias("features"), - col("rating").cast("float"), - ) - concat_test_df = joined_test_df.select( - "userId", - "movieId", - concat_arrays_udf(col("iFeatures"), col("uFeatures")).alias("features"), - col("rating").cast("float"), - ) - - pandas_df = concat_train_df.toPandas() - pandas_test_df = concat_test_df.toPandas() - - # This syntax will create a new DataFrame where elements of the 'features' vector - # are each in their own column. This is what we'll train our neural network on. - x_test = pd.DataFrame(pandas_test_df.features.values.tolist(), index=pandas_test_df.index) - x_train = pd.DataFrame(pandas_df.features.values.tolist(), index=pandas_df.index) - - # Show matrix for example. - print("Training matrix:") - print(x_train) - - # Create our Keras model with two fully connected hidden layers. - model = Sequential() - model.add(Dense(30, input_dim=24, activation="relu")) - model.add(Dense(hidden_units, activation="relu")) - model.add(Dense(1, activation="linear")) - - model.compile(loss="mse", optimizer=keras.optimizers.Adam(lr=0.0001)) - - early_stopping = EarlyStopping(monitor="val_loss", min_delta=0.0001, patience=2, mode="auto") - - model.fit( - x_train, - pandas_df["rating"], - validation_split=0.2, - verbose=2, - epochs=3, - batch_size=128, - shuffle=False, - callbacks=[early_stopping], - ) - - train_mse = model.evaluate(x_train, pandas_df["rating"], verbose=2) - test_mse = model.evaluate(x_test, pandas_test_df["rating"], verbose=2) - mlflow.log_metric("test_mse", test_mse) - mlflow.log_metric("train_mse", train_mse) - - print(f"The model had a MSE on the test set of {test_mse}") - mlflow.tensorflow.log_model(model, "keras-model") + with pyspark.sql.SparkSession.builder.getOrCreate() as spark: + als_model = mlflow.spark.load_model(als_model_uri).stages[0] + ratings_df = spark.read.parquet(ratings_data) + (training_df, test_df) = ratings_df.randomSplit([0.8, 0.2], seed=42) + training_df.cache() + test_df.cache() + + mlflow.log_metric("training_nrows", training_df.count()) + mlflow.log_metric("test_nrows", test_df.count()) + + print(f"Training: {training_df.count()}, test: {test_df.count()}") + + user_factors = als_model.userFactors.selectExpr("id as userId", "features as uFeatures") + item_factors = als_model.itemFactors.selectExpr("id as movieId", "features as iFeatures") + joined_train_df = training_df.join(item_factors, on="movieId").join( + user_factors, on="userId" + ) + joined_test_df = test_df.join(item_factors, on="movieId").join(user_factors, on="userId") + + # We'll combine the movies and ratings vectors into a single vector of length 24. + # We will then explode this features vector into a set of columns. + def concat_arrays(*args): + return list(chain(*args)) + + concat_arrays_udf = udf(concat_arrays, ArrayType(FloatType())) + + concat_train_df = joined_train_df.select( + "userId", + "movieId", + concat_arrays_udf(col("iFeatures"), col("uFeatures")).alias("features"), + col("rating").cast("float"), + ) + concat_test_df = joined_test_df.select( + "userId", + "movieId", + concat_arrays_udf(col("iFeatures"), col("uFeatures")).alias("features"), + col("rating").cast("float"), + ) + + pandas_df = concat_train_df.toPandas() + pandas_test_df = concat_test_df.toPandas() + + # This syntax will create a new DataFrame where elements of the 'features' vector + # are each in their own column. This is what we'll train our neural network on. + x_test = pd.DataFrame(pandas_test_df.features.values.tolist(), index=pandas_test_df.index) + x_train = pd.DataFrame(pandas_df.features.values.tolist(), index=pandas_df.index) + + # Show matrix for example. + print("Training matrix:") + print(x_train) + + # Create our Keras model with two fully connected hidden layers. + model = Sequential() + model.add(Dense(30, input_dim=24, activation="relu")) + model.add(Dense(hidden_units, activation="relu")) + model.add(Dense(1, activation="linear")) + + model.compile(loss="mse", optimizer=keras.optimizers.Adam(lr=0.0001)) + + early_stopping = EarlyStopping( + monitor="val_loss", min_delta=0.0001, patience=2, mode="auto" + ) + + model.fit( + x_train, + pandas_df["rating"], + validation_split=0.2, + verbose=2, + epochs=3, + batch_size=128, + shuffle=False, + callbacks=[early_stopping], + ) + + train_mse = model.evaluate(x_train, pandas_df["rating"], verbose=2) + test_mse = model.evaluate(x_test, pandas_test_df["rating"], verbose=2) + mlflow.log_metric("test_mse", test_mse) + mlflow.log_metric("train_mse", train_mse) + + print(f"The model had a MSE on the test set of {test_mse}") + mlflow.tensorflow.log_model(model, "keras-model") if __name__ == "__main__": diff --git a/tests/models/test_signature.py b/tests/models/test_signature.py index 78c1f38484abd..d8525d6bcced0 100644 --- a/tests/models/test_signature.py +++ b/tests/models/test_signature.py @@ -163,14 +163,14 @@ def test_signature_inference_infers_datime_types_as_expected(): signature = infer_signature(test_df) assert signature.inputs == Schema([ColSpec(DataType.datetime, name=col_name)]) - spark = pyspark.sql.SparkSession.builder.getOrCreate() - spark_df = spark.range(1).selectExpr( - "current_timestamp() as timestamp", "current_date() as date" - ) - signature = infer_signature(spark_df) - assert signature.inputs == Schema( - [ColSpec(DataType.datetime, name="timestamp"), ColSpec(DataType.datetime, name="date")] - ) + with pyspark.sql.SparkSession.builder.getOrCreate() as spark: + spark_df = spark.range(1).selectExpr( + "current_timestamp() as timestamp", "current_date() as date" + ) + signature = infer_signature(spark_df) + assert signature.inputs == Schema( + [ColSpec(DataType.datetime, name="timestamp"), ColSpec(DataType.datetime, name="date")] + ) def test_set_signature_to_logged_model(): diff --git a/tests/pyfunc/test_spark.py b/tests/pyfunc/test_spark.py index 6d7cdf1bfd20f..53affa8c6d469 100644 --- a/tests/pyfunc/test_spark.py +++ b/tests/pyfunc/test_spark.py @@ -58,7 +58,7 @@ def score_model_as_udf(model_uri, pandas_df, result_type="double"): - spark = get_spark_session(pyspark.SparkConf()) + spark = pyspark.sql.SparkSession.getActiveSession() spark_df = spark.createDataFrame(pandas_df).coalesce(1) pyfunc_udf = spark_udf( spark=spark, model_uri=model_uri, result_type=result_type, env_manager="local" diff --git a/tests/spark/test_spark_model_export.py b/tests/spark/test_spark_model_export.py index ada83712ce04e..7793fbaf63542 100644 --- a/tests/spark/test_spark_model_export.py +++ b/tests/spark/test_spark_model_export.py @@ -72,21 +72,23 @@ def spark_custom_env(tmp_path): def _get_spark_session_with_retry(max_tries=3): conf = pyspark.SparkConf() - for num_tries in range(max_tries): + for attempt in range(max_tries): try: return get_spark_session(conf) - except Exception: - if num_tries >= max_tries - 1: + except Exception as e: + if attempt >= max_tries - 1: raise - _logger.exception(f"Attempt {num_tries} to create a SparkSession failed, retrying...") + _logger.exception( + f"Attempt {attempt} to create a SparkSession failed ({e!r}), retrying..." + ) # Specify `autouse=True` to ensure that a context is created # before any tests are executed. This ensures that the Hadoop filesystem # does not create its own SparkContext without the MLeap libraries required by # other tests. -@pytest.fixture(scope="module", autouse=True) -def spark_context(): +@pytest.fixture(scope="module") +def spark(): if Version(pyspark.__version__) < Version("3.1"): # A workaround for this issue: # https://stackoverflow.com/questions/62109276/errorjava-lang-unsupportedoperationexception-for-pyspark-pandas-udf-documenta @@ -103,9 +105,9 @@ def spark_context(): spark.executor.extraJavaOptions="-Dio.netty.tryReflectionSetAccessible=true" """ f.write(conf) - spark = _get_spark_session_with_retry() - yield spark.sparkContext - spark.stop() + + with _get_spark_session_with_retry() as spark: + yield spark def iris_pandas_df(): @@ -119,11 +121,10 @@ def iris_pandas_df(): @pytest.fixture(scope="module") -def iris_df(spark_context): +def iris_df(spark): pdf = iris_pandas_df() feature_names = list(pdf.drop("label", axis=1).columns) - spark_session = pyspark.sql.SparkSession(spark_context) - iris_spark_df = spark_session.createDataFrame(pdf) + iris_spark_df = spark.createDataFrame(pdf) return feature_names, pdf, iris_spark_df @@ -170,8 +171,7 @@ def spark_model_transformer(iris_df): @pytest.fixture(scope="module") -def spark_model_estimator(iris_df, spark_context): - # pylint: disable=unused-argument +def spark_model_estimator(iris_df): feature_names, iris_pandas_df, iris_spark_df = iris_df assembler = VectorAssembler(inputCols=feature_names, outputCol="features") features_df = assembler.transform(iris_spark_df) @@ -190,6 +190,7 @@ def model_path(tmp_path): return os.path.join(tmp_path, "model") +@pytest.mark.usefixtures("spark") def test_hadoop_filesystem(tmp_path): # copy local dir to and back from HadoopFS and make sure the results match from mlflow.spark import _HadoopFileSystem as FS diff --git a/tests/types/test_schema.py b/tests/types/test_schema.py index 79c7538ca403d..ecde1398aac85 100644 --- a/tests/types/test_schema.py +++ b/tests/types/test_schema.py @@ -632,10 +632,12 @@ def test_spark_type_mapping(pandas_df_with_all_types): ) actual_spark_schema = schema.as_spark_schema() assert expected_spark_schema.jsonValue() == actual_spark_schema.jsonValue() - spark_session = pyspark.sql.SparkSession(pyspark.SparkContext.getOrCreate()) - sparkdf = spark_session.createDataFrame(pandas_df_with_all_types, schema=actual_spark_schema) - schema2 = _infer_schema(sparkdf) - assert schema == schema2 + with pyspark.sql.SparkSession(pyspark.SparkContext.getOrCreate()) as spark_session: + sparkdf = spark_session.createDataFrame( + pandas_df_with_all_types, schema=actual_spark_schema + ) + schema2 = _infer_schema(sparkdf) + assert schema == schema2 # test unnamed columns schema = Schema([ColSpec(col.type) for col in schema.inputs]) From 5dec084dea19061a8e391fe91654a63c8c95f27d Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Sun, 22 Oct 2023 16:10:21 -0700 Subject: [PATCH 047/101] Surface error "missing targets column" early (#10020) Signed-off-by: Ann Zhang --- mlflow/metrics/metric_definitions.py | 38 ++++++++--------- mlflow/models/evaluation/default_evaluator.py | 41 ++++++++++--------- tests/evaluate/test_default_evaluator.py | 16 ++++++-- 3 files changed, 53 insertions(+), 42 deletions(-) diff --git a/mlflow/metrics/metric_definitions.py b/mlflow/metrics/metric_definitions.py index 46a3937e77c91..479f77004e1d5 100644 --- a/mlflow/metrics/metric_definitions.py +++ b/mlflow/metrics/metric_definitions.py @@ -33,7 +33,7 @@ def _validate_text_data(data, metric_name, column_name): return True -def _token_count_eval_fn(predictions, targets, metrics): +def _token_count_eval_fn(predictions, targets=None, metrics=None): import tiktoken # ref: https://github.com/openai/tiktoken/issues/75 @@ -59,7 +59,7 @@ def _cached_evaluate_load(path, module_type=None): return evaluate.load(path, module_type=module_type) -def _toxicity_eval_fn(predictions, targets, metrics): +def _toxicity_eval_fn(predictions, targets=None, metrics=None): if not _validate_text_data(predictions, "toxicity", "predictions"): return try: @@ -83,7 +83,7 @@ def _toxicity_eval_fn(predictions, targets, metrics): ) -def _perplexity_eval_fn(predictions, targets, metrics): +def _perplexity_eval_fn(predictions, targets=None, metrics=None): if not _validate_text_data(predictions, "perplexity", "predictions"): return @@ -102,7 +102,7 @@ def _perplexity_eval_fn(predictions, targets, metrics): ) -def _flesch_kincaid_eval_fn(predictions, targets, metrics): +def _flesch_kincaid_eval_fn(predictions, targets=None, metrics=None): if not _validate_text_data(predictions, "flesch_kincaid", "predictions"): return @@ -119,7 +119,7 @@ def _flesch_kincaid_eval_fn(predictions, targets, metrics): ) -def _ari_eval_fn(predictions, targets, metrics): +def _ari_eval_fn(predictions, targets=None, metrics=None): if not _validate_text_data(predictions, "ari", "predictions"): return @@ -138,7 +138,7 @@ def _ari_eval_fn(predictions, targets, metrics): ) -def _accuracy_eval_fn(predictions, targets, metrics, sample_weight=None): +def _accuracy_eval_fn(predictions, targets=None, metrics=None, sample_weight=None): if targets is not None and len(targets) != 0: from sklearn.metrics import accuracy_score @@ -146,7 +146,7 @@ def _accuracy_eval_fn(predictions, targets, metrics, sample_weight=None): return MetricValue(aggregate_results={"exact_match": acc}) -def _rouge1_eval_fn(predictions, targets, metrics): +def _rouge1_eval_fn(predictions, targets=None, metrics=None): if targets is not None and len(targets) != 0: if not _validate_text_data(targets, "rouge1", "targets") or not _validate_text_data( predictions, "rouge1", "predictions" @@ -173,7 +173,7 @@ def _rouge1_eval_fn(predictions, targets, metrics): ) -def _rouge2_eval_fn(predictions, targets, metrics): +def _rouge2_eval_fn(predictions, targets=None, metrics=None): if targets is not None and len(targets) != 0: if not _validate_text_data(targets, "rouge2", "targets") or not _validate_text_data( predictions, "rouge2", "predictions" @@ -200,7 +200,7 @@ def _rouge2_eval_fn(predictions, targets, metrics): ) -def _rougeL_eval_fn(predictions, targets, metrics): +def _rougeL_eval_fn(predictions, targets=None, metrics=None): if targets is not None and len(targets) != 0: if not _validate_text_data(targets, "rougeL", "targets") or not _validate_text_data( predictions, "rougeL", "predictions" @@ -227,7 +227,7 @@ def _rougeL_eval_fn(predictions, targets, metrics): ) -def _rougeLsum_eval_fn(predictions, targets, metrics): +def _rougeLsum_eval_fn(predictions, targets=None, metrics=None): if targets is not None and len(targets) != 0: if not _validate_text_data(targets, "rougeLsum", "targets") or not _validate_text_data( predictions, "rougeLsum", "predictions" @@ -254,7 +254,7 @@ def _rougeLsum_eval_fn(predictions, targets, metrics): ) -def _mae_eval_fn(predictions, targets, metrics, sample_weight=None): +def _mae_eval_fn(predictions, targets=None, metrics=None, sample_weight=None): if targets is not None and len(targets) != 0: from sklearn.metrics import mean_absolute_error @@ -262,7 +262,7 @@ def _mae_eval_fn(predictions, targets, metrics, sample_weight=None): return MetricValue(aggregate_results={"mean_absolute_error": mae}) -def _mse_eval_fn(predictions, targets, metrics, sample_weight=None): +def _mse_eval_fn(predictions, targets=None, metrics=None, sample_weight=None): if targets is not None and len(targets) != 0: from sklearn.metrics import mean_squared_error @@ -270,7 +270,7 @@ def _mse_eval_fn(predictions, targets, metrics, sample_weight=None): return MetricValue(aggregate_results={"mean_squared_error": mse}) -def _rmse_eval_fn(predictions, targets, metrics, sample_weight=None): +def _rmse_eval_fn(predictions, targets=None, metrics=None, sample_weight=None): if targets is not None and len(targets) != 0: from sklearn.metrics import mean_squared_error @@ -278,7 +278,7 @@ def _rmse_eval_fn(predictions, targets, metrics, sample_weight=None): return MetricValue(aggregate_results={"root_mean_squared_error": rmse}) -def _r2_score_eval_fn(predictions, targets, metrics, sample_weight=None): +def _r2_score_eval_fn(predictions, targets=None, metrics=None, sample_weight=None): if targets is not None and len(targets) != 0: from sklearn.metrics import r2_score @@ -286,7 +286,7 @@ def _r2_score_eval_fn(predictions, targets, metrics, sample_weight=None): return MetricValue(aggregate_results={"r2_score": r2}) -def _max_error_eval_fn(predictions, targets, metrics): +def _max_error_eval_fn(predictions, targets=None, metrics=None): if targets is not None and len(targets) != 0: from sklearn.metrics import max_error @@ -294,7 +294,7 @@ def _max_error_eval_fn(predictions, targets, metrics): return MetricValue(aggregate_results={"max_error": error}) -def _mape_eval_fn(predictions, targets, metrics, sample_weight=None): +def _mape_eval_fn(predictions, targets=None, metrics=None, sample_weight=None): if targets is not None and len(targets) != 0: from sklearn.metrics import mean_absolute_percentage_error @@ -303,7 +303,7 @@ def _mape_eval_fn(predictions, targets, metrics, sample_weight=None): def _recall_eval_fn( - predictions, targets, metrics, pos_label=1, average="binary", sample_weight=None + predictions, targets=None, metrics=None, pos_label=1, average="binary", sample_weight=None ): if targets is not None and len(targets) != 0: from sklearn.metrics import recall_score @@ -315,7 +315,7 @@ def _recall_eval_fn( def _precision_eval_fn( - predictions, targets, metrics, pos_label=1, average="binary", sample_weight=None + predictions, targets=None, metrics=None, pos_label=1, average="binary", sample_weight=None ): if targets is not None and len(targets) != 0: from sklearn.metrics import precision_score @@ -331,7 +331,7 @@ def _precision_eval_fn( def _f1_score_eval_fn( - predictions, targets, metrics, pos_label=1, average="binary", sample_weight=None + predictions, targets=None, metrics=None, pos_label=1, average="binary", sample_weight=None ): if targets is not None and len(targets) != 0: from sklearn.metrics import f1_score diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index 1404b2c81208b..0ace3417b9b4a 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1168,7 +1168,10 @@ def _get_args_for_metrics(self, extra_metric, eval_df): if "target" in eval_df_copy: eval_fn_args.append(eval_df_copy["target"]) else: - eval_fn_args.append(None) + if param.default == inspect.Parameter.empty: + params_not_found.append(param_name) + else: + eval_fn_args.append(param.default) elif column == "metrics": eval_fn_args.append(copy.deepcopy(self.metrics_values)) else: @@ -1194,6 +1197,8 @@ def _get_args_for_metrics(self, extra_metric, eval_df): eval_fn_args.append(self.evaluator_config.get(column)) elif param.default == inspect.Parameter.empty: params_not_found.append(param_name) + else: + eval_fn_args.append(param.default) if len(params_not_found) > 0: return extra_metric.name, params_not_found @@ -1434,29 +1439,27 @@ def _check_args(self, metrics, eval_df): failed_metrics.append(result) if len(failed_metrics) > 0: - output_column_name = self.predictions output_columns = ( [] if self.other_output_columns is None else list(self.other_output_columns.columns) ) input_columns = list(self.X.copy_to_avoid_mutation().columns) - error_messages = [] - for metric_name, param_names in failed_metrics: - error_messages.append(f"Metric '{metric_name}' requires the columns {param_names}") - error_message = "\n".join(error_messages) - raise MlflowException( - "Error: Metric calculation failed for the following metrics:\n" - f"{error_message}\n\n" - "Below are the existing column names for the input/output data:\n" - f"Input Columns: {input_columns}\n" - f"Output Columns: {output_columns}\n" - "Note that this does not include the output column: " - f"'{output_column_name}'\n\n" - f"To resolve this issue, you may want to map the missing column to an " - "existing column using the following configuration:\n" - f"evaluator_config={{'col_mapping': {{'': " - "''}}\n" - ) + error_messages = [ + f"Metric '{metric_name}' requires the columns {param_names}" + for metric_name, param_names in failed_metrics + ] + joined_error_message = "\n".join(error_messages) + full_message = f"""Error: Metric calculation failed for the following metrics: + {joined_error_message} + + Below are the existing column names for the input/output data: + Input Columns: {input_columns} + Output Columns: {output_columns} + To resolve this issue, you may want to map the missing column to an existing column + using the following configuration: + evaluator_config={{'col_mapping': {{: }}}}""" + stripped_message = "\n".join(l.lstrip() for l in full_message.splitlines()) + raise MlflowException(stripped_message) def _test_first_row(self, eval_df): # test calculations on first row of eval_df diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index 2c55c1da84371..5e654f852ce5a 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -2192,9 +2192,17 @@ def dummy_fn2(param_3, param_4, builtin_metrics): metric_1 = make_metric(name="metric_1", eval_fn=dummy_fn1, greater_is_better=True) metric_2 = make_metric(name="metric_2", eval_fn=dummy_fn2, greater_is_better=True) - error_message = "Error: Metric calculation failed for the following metrics:\nMetric 'metric_1'" - " requires the columns ['param_1', 'param_2']\n\nMetric 'metric_2' requires the columns " - "['param_3', 'builtin_metrics']\n" + error_message = ( + r"Error: Metric calculation failed for the following metrics:\n" + r"Metric 'metric_1' requires the columns \['param_1', 'param_2'\]\n" + r"Metric 'metric_2' requires the columns \['param_3', 'builtin_metrics'\]\n\n" + r"Below are the existing column names for the input/output data:\n" + r"Input Columns: \['question'\]\n" + r"Output Columns: \[\]\n" + r"To resolve this issue, you may want to map the missing column to an existing column\n" + r"using the following configuration:\n" + r"evaluator_config=\{'col_mapping': \{: \}\}" + ) with pytest.raises( MlflowException, @@ -2876,7 +2884,7 @@ def test_evaluate_no_model_type_with_custom_metric(): from mlflow.metrics import make_metric from mlflow.metrics.metric_definitions import standard_aggregations - def word_count_eval(predictions, targets, metrics): + def word_count_eval(predictions, targets=None, metrics=None): scores = [] for prediction in predictions: scores.append(len(prediction.split(" "))) From 3b81d43ac6bc74fc30295d69b96bda1822cbffc5 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Sun, 22 Oct 2023 17:29:45 -0700 Subject: [PATCH 048/101] RAG example notebook (#10054) Signed-off-by: Prithvi Kannan --- .../LLM Evaluation Examples -- RAG.ipynb | 604 ++++++++++++++++++ 1 file changed, 604 insertions(+) create mode 100644 examples/evaluation/LLM Evaluation Examples -- RAG.ipynb diff --git a/examples/evaluation/LLM Evaluation Examples -- RAG.ipynb b/examples/evaluation/LLM Evaluation Examples -- RAG.ipynb new file mode 100644 index 0000000000000..1269c073d450e --- /dev/null +++ b/examples/evaluation/LLM Evaluation Examples -- RAG.ipynb @@ -0,0 +1,604 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "42084110-295b-493a-9b3e-5d8d29ff78b3", + "showTitle": false, + "title": "" + } + }, + "source": [ + "# LLM RAG Evaluation with MLflow Example Notebook\n", + "\n", + "In this notebook, we will demonstrate how to evaluate various a RAG system with MLflow." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import os" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "bdff35e3-0e09-48b8-87ce-78759de88998", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Set OpenAI Key" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "bec25067-224d-4ee8-9b5d-0beeb6cde684", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "os.environ[\"OPENAI_API_KEY\"] = \"redacted\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "fb946228-62fb-4d68-9732-75935c9cb401", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "import pandas as pd\n", + "\n", + "import mlflow" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "273d1345-95d7-435a-a7b6-a5f3dbb3f073", + "showTitle": false, + "title": "" + } + }, + "source": [ + "## Create a RAG system\n", + "\n", + "Use Langchain and Chroma to create a RAG system that answers questions based on the MLflow documentation." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "2c28d0ad-f469-46ab-a2b4-c5e8db50a729", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "from langchain.chains import RetrievalQA\n", + "from langchain.document_loaders import WebBaseLoader\n", + "from langchain.embeddings.openai import OpenAIEmbeddings\n", + "from langchain.llms import OpenAI\n", + "from langchain.text_splitter import CharacterTextSplitter\n", + "from langchain.vectorstores import Chroma" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "83a7e77e-6717-472a-86dc-02e2c356ddef", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "loader = WebBaseLoader(\"https://mlflow.org/docs/latest/index.html\")\n", + "\n", + "documents = loader.load()\n", + "text_splitter = CharacterTextSplitter(chunk_size=1000, chunk_overlap=0)\n", + "texts = text_splitter.split_documents(documents)\n", + "\n", + "embeddings = OpenAIEmbeddings()\n", + "docsearch = Chroma.from_documents(texts, embeddings)\n", + "\n", + "qa = RetrievalQA.from_chain_type(\n", + " llm=OpenAI(temperature=0),\n", + " chain_type=\"stuff\",\n", + " retriever=docsearch.as_retriever(),\n", + " return_source_documents=True,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "fd70bcf6-7c44-44d3-9435-567b82611e1c", + "showTitle": false, + "title": "" + } + }, + "source": [ + "## Evaluate the RAG system using `mlflow.evaluate()`" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "de1bc359-2e40-459c-bea4-bed35a117988", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Create a simple function that runs each input through the RAG chain" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "667ec809-2bb5-4170-9937-6804386b41ec", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "def model(input_df):\n", + " answer = []\n", + " for index, row in input_df.iterrows():\n", + " answer.append(qa(row[\"questions\"]))\n", + "\n", + " return answer" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "d1064306-b7f3-4b3e-825c-4353d808f21d", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Create an eval dataset" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "a5481491-e4a9-42ea-8a3f-f527faffd04d", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "eval_df = pd.DataFrame(\n", + " {\n", + " \"questions\": [\n", + " \"What is MLflow?\",\n", + " \"How to run Mlflow.evalaute()?\",\n", + " \"How to log_table()?\",\n", + " \"How to load_table()?\",\n", + " ],\n", + " }\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "9c3c8023-8feb-427a-b36d-34cd1853a5dc", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Create a relevance metric" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "3882b940-9c25-41ce-a301-72d8c0c90aaa", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "from mlflow.metrics.genai.metric_definitions import relevance\n", + "\n", + "relevance_metric = relevance(model=\"openai:/gpt-4\")" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "ea40ce52-6ac7-4c20-9669-d24f80a6cebe", + "showTitle": false, + "title": "" + } + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023/10/20 13:50:31 INFO mlflow.models.evaluation.base: Evaluating the model with the default evaluator.\n", + "Number of requested results 4 is greater than number of elements in index 3, updating n_results = 3\n", + "Number of requested results 4 is greater than number of elements in index 3, updating n_results = 3\n", + "Number of requested results 4 is greater than number of elements in index 3, updating n_results = 3\n", + "Number of requested results 4 is greater than number of elements in index 3, updating n_results = 3\n", + "2023/10/20 13:50:38 INFO mlflow.models.evaluation.default_evaluator: Evaluating builtin metrics: token_count\n", + "2023/10/20 13:50:38 INFO mlflow.metrics.metric_definitions: Computing token count metric:\n", + "2023/10/20 13:50:38 INFO mlflow.models.evaluation.default_evaluator: Evaluating builtin metrics: toxicity\n", + "2023/10/20 13:50:38 INFO mlflow.metrics.metric_definitions: Loading toxicity metric:\n", + "2023/10/20 13:50:38 INFO mlflow.metrics.metric_definitions: Computing toxicity metric:\n", + "2023/10/20 13:50:39 INFO mlflow.models.evaluation.default_evaluator: Evaluating builtin metrics: perplexity\n", + "2023/10/20 13:50:39 INFO mlflow.metrics.metric_definitions: Loading perplexity metric:\n", + "2023/10/20 13:50:39 INFO mlflow.metrics.metric_definitions: Computing perplexity metric:\n", + "Using pad_token, but it is not set yet.\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "a85dd3a9e9b84b71a61b5699b06a2775", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/1 [00:00\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
questionsoutputsquerysource_documentslatencytoken_counttoxicity/v1/scoreperplexity/v1/scoreflesch_kincaid_grade_level/v1/scoreari_grade_level/v1/scorerelevance/v1/scorerelevance/v1/justification
0What is MLflow?MLflow is an open source platform for managin...What is MLflow?[{'lc_attributes': {}, 'lc_namespace': ['langc...4.5712911760.00020828.62659115.418.95The output provides a comprehensive answer to ...
1How to run Mlflow.evalaute()?You can use the Mlflow.evaluate() function to...How to run Mlflow.evalaute()?[{'lc_attributes': {}, 'lc_namespace': ['langc...1.253230480.00026321.1496707.06.05The output provides a comprehensive answer to ...
2How to log_table()?log_table() is a function that is part of the...How to log_table()?[{'lc_attributes': {}, 'lc_namespace': ['langc...1.217061470.00014523.4114007.17.75The output provides a comprehensive answer to ...
3How to load_table()?load_table() is not a function in MLflow.How to load_table()?[{'lc_attributes': {}, 'lc_namespace': ['langc...0.680665110.000144193.9162752.55.65The output directly and accurately answers the...
\n", + "" + ], + "text/plain": [ + " questions \\\n", + "0 What is MLflow? \n", + "1 How to run Mlflow.evalaute()? \n", + "2 How to log_table()? \n", + "3 How to load_table()? \n", + "\n", + " outputs \\\n", + "0 MLflow is an open source platform for managin... \n", + "1 You can use the Mlflow.evaluate() function to... \n", + "2 log_table() is a function that is part of the... \n", + "3 load_table() is not a function in MLflow. \n", + "\n", + " query \\\n", + "0 What is MLflow? \n", + "1 How to run Mlflow.evalaute()? \n", + "2 How to log_table()? \n", + "3 How to load_table()? \n", + "\n", + " source_documents latency token_count \\\n", + "0 [{'lc_attributes': {}, 'lc_namespace': ['langc... 4.571291 176 \n", + "1 [{'lc_attributes': {}, 'lc_namespace': ['langc... 1.253230 48 \n", + "2 [{'lc_attributes': {}, 'lc_namespace': ['langc... 1.217061 47 \n", + "3 [{'lc_attributes': {}, 'lc_namespace': ['langc... 0.680665 11 \n", + "\n", + " toxicity/v1/score perplexity/v1/score \\\n", + "0 0.000208 28.626591 \n", + "1 0.000263 21.149670 \n", + "2 0.000145 23.411400 \n", + "3 0.000144 193.916275 \n", + "\n", + " flesch_kincaid_grade_level/v1/score ari_grade_level/v1/score \\\n", + "0 15.4 18.9 \n", + "1 7.0 6.0 \n", + "2 7.1 7.7 \n", + "3 2.5 5.6 \n", + "\n", + " relevance/v1/score relevance/v1/justification \n", + "0 5 The output provides a comprehensive answer to ... \n", + "1 5 The output provides a comprehensive answer to ... \n", + "2 5 The output provides a comprehensive answer to ... \n", + "3 5 The output directly and accurately answers the... " + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "results.tables[\"eval_results_table\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 2 + }, + "notebookName": "LLM Evaluation Examples -- RAG", + "widgets": {} + }, + "kernelspec": { + "display_name": "mlflow-dev-env", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.13" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} From b777cf39d624c707311b236f86ca08fcc7d4e7b9 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Sun, 22 Oct 2023 17:44:37 -0700 Subject: [PATCH 049/101] Update default model to gpt-4 (#10055) Signed-off-by: Ann Zhang --- mlflow/metrics/genai/genai_metric.py | 8 ++++---- mlflow/metrics/genai/metric_definitions.py | 6 +++--- mlflow/metrics/genai/prompts/v1.py | 2 +- mlflow/metrics/genai/utils.py | 4 ++++ tests/metrics/genai/prompts/test_v1.py | 2 +- tests/metrics/genai/test_genai_metrics.py | 2 +- 6 files changed, 14 insertions(+), 10 deletions(-) diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index 54b30055c83ef..a57b0887296ea 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -8,7 +8,7 @@ from mlflow.exceptions import MlflowException from mlflow.metrics.base import EvaluationExample, MetricValue from mlflow.metrics.genai import model_utils -from mlflow.metrics.genai.utils import _get_latest_metric_version +from mlflow.metrics.genai.utils import _get_default_model, _get_latest_metric_version from mlflow.models import EvaluationMetric, make_metric from mlflow.protos.databricks_pb2 import ( BAD_REQUEST, @@ -89,7 +89,7 @@ def make_genai_metric( grading_prompt: str, examples: Optional[List[EvaluationExample]] = None, version: Optional[str] = _get_latest_metric_version(), - model: Optional[str] = "openai:/gpt-3.5-turbo-16k", + model: Optional[str] = _get_default_model(), grading_context_columns: Optional[List[str]] = [], # noqa: B006 parameters: Optional[Dict[str, Any]] = None, aggregations: Optional[List[str]] = ["mean", "variance", "p90"], # noqa: B006 @@ -107,7 +107,7 @@ def make_genai_metric( :param version: (Optional) Version of the metric. Currently supported versions are: v1. :param model: (Optional) Model uri of the of an openai or gateway judge model in the format of "openai:/gpt-4" or "gateway:/my-route". Defaults to - "openai:/gpt-3.5-turbo-16k". Your use of a third party LLM service (e.g., OpenAI) for + "openai:/gpt-4". Your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. :param grading_context_columns: (Optional) grading_context_columns required to compute the metric. These grading_context_columns are used by the LLM as a judge as additional @@ -178,7 +178,7 @@ def make_genai_metric( ), examples=[example], version="v1", - model="openai:/gpt-3.5-turbo-16k", + model="openai:/gpt-4", grading_context_columns=["ground_truth"], parameters={"temperature": 0.0}, aggregations=["mean", "variance", "p90"], diff --git a/mlflow/metrics/genai/metric_definitions.py b/mlflow/metrics/genai/metric_definitions.py index ccee4e0fb75fc..3317d3a1079c8 100644 --- a/mlflow/metrics/genai/metric_definitions.py +++ b/mlflow/metrics/genai/metric_definitions.py @@ -29,7 +29,7 @@ def answer_similarity( An MlflowException will be raised if the specified version for this metric does not exist. :param model: (Optional) The model that will be used to evaluate this metric. Defaults to - gpt-3.5-turbo-16k. Your use of a third party LLM service (e.g., OpenAI) for evaluation may + gpt-4. Your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. :param metric_version: (Optional) The version of the answer similarity metric to use. Defaults to the latest version. @@ -100,7 +100,7 @@ def strict_correctness( An MlflowException will be raised if the specified version for this metric does not exist. :param model: (Optional) The model that will be used to evaluate this metric. Defaults to - gpt-3.5-turbo-16k. Your use of a third party LLM service (e.g., OpenAI) for evaluation may + gpt-4. Your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. :param metric_version: (Optional) The version of the strict correctness metric to use. Defaults to the latest version. @@ -167,7 +167,7 @@ def relevance( An MlflowException will be raised if the specified version for this metric does not exist. :param model: (Optional) The model that will be used to evaluate this metric. Defaults to - gpt-3.5-turbo-16k. Your use of a third party LLM service (e.g., OpenAI) for evaluation may + gpt-4. Your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. :param metric_version: (Optional) The version of the relevance metric to use. Defaults to the latest version. diff --git a/mlflow/metrics/genai/prompts/v1.py b/mlflow/metrics/genai/prompts/v1.py index 64349b2d71b97..255d5d50819f1 100644 --- a/mlflow/metrics/genai/prompts/v1.py +++ b/mlflow/metrics/genai/prompts/v1.py @@ -9,7 +9,7 @@ ) # TODO: Update the default_mode and default_parameters to the correct values post experimentation -default_model = "openai:/gpt-3.5-turbo-16k" +default_model = "openai:/gpt-4" default_parameters = { "temperature": 0.0, "max_tokens": 200, diff --git a/mlflow/metrics/genai/utils.py b/mlflow/metrics/genai/utils.py index 94966566685c0..3c9f43f1740de 100644 --- a/mlflow/metrics/genai/utils.py +++ b/mlflow/metrics/genai/utils.py @@ -1,2 +1,6 @@ def _get_latest_metric_version(): return "v1" + + +def _get_default_model(): + return "openai:/gpt-4" diff --git a/tests/metrics/genai/prompts/test_v1.py b/tests/metrics/genai/prompts/test_v1.py index 68f5ff7738c60..5356cec852264 100644 --- a/tests/metrics/genai/prompts/test_v1.py +++ b/tests/metrics/genai/prompts/test_v1.py @@ -150,7 +150,7 @@ def test_evaluation_model_output(): """, ).to_dict() - assert model2["model"] == "openai:/gpt-3.5-turbo-16k" + assert model2["model"] == "openai:/gpt-4" assert model2["parameters"] == { "temperature": 0.0, "max_tokens": 200, diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index 98a50c6c0df8b..5ed6f71556294 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -665,7 +665,7 @@ def test_strict_correctness_metric(): pd.Series([mlflow_ground_truth]), ) assert mock_predict_function.call_count == 1 - assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo-16k" + assert mock_predict_function.call_args[0][0] == "openai:/gpt-4" assert mock_predict_function.call_args[0][1] == { "prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was " "sent to a machine\nlearning model, and you will be given an output that the model " From 80073e8fa545057e1165d8c802fe2798e3fb2d71 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Mon, 23 Oct 2023 13:19:15 +0900 Subject: [PATCH 050/101] Add a warning for model version aliases in `MlflowClient. search_model_versions` (#10060) Signed-off-by: harupy --- mlflow/tracking/client.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mlflow/tracking/client.py b/mlflow/tracking/client.py index c278bbc916618..f59ec61ff199f 100644 --- a/mlflow/tracking/client.py +++ b/mlflow/tracking/client.py @@ -3018,6 +3018,10 @@ def search_model_versions( """ Search for model versions in backend that satisfy the filter criteria. + .. warning: + + The model version search results may not have aliases populated for performance reasons. + :param filter_string: Filter query string (e.g., ``"name = 'a_model_name' and tag.key = 'value1'"``), defaults to searching for all model versions. The following identifiers, comparators, From 9abcca4cb54f6cf9de74c593d3fbb5b5fe02ce43 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Mon, 23 Oct 2023 13:27:05 +0900 Subject: [PATCH 051/101] Split more CI jobs (#10064) Signed-off-by: harupy <17039389+harupy@users.noreply.github.com> Signed-off-by: harupy --- .github/workflows/master.yml | 31 +++++++++++++++---- .../tracking/request_header/test_registry.py | 6 ++++ tests/tracking/test_client.py | 6 ++++ 3 files changed, 37 insertions(+), 6 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 6b59eeb79afb2..dfe29cbdc4fc3 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -88,6 +88,11 @@ jobs: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false runs-on: ubuntu-latest timeout-minutes: 120 + strategy: + matrix: + group: [1, 2] + include: + - splits: 2 steps: - uses: actions/checkout@v3 with: @@ -114,8 +119,9 @@ jobs: run: | . ~/.venv/bin/activate source dev/setup-ssh.sh - pytest tests --quiet --requires-ssh --ignore-flavors \ - --ignore=tests/examples --ignore=tests/recipes --ignore=tests/evaluate + pytest --splits=${{ matrix.splits }} --group=${{ matrix.group }} --quiet --requires-ssh \ + --ignore-flavors --ignore=tests/examples --ignore=tests/recipes --ignore=tests/evaluate \ + tests database: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false @@ -243,9 +249,9 @@ jobs: timeout-minutes: 120 strategy: matrix: - group: [1, 2, 3] + group: [1, 2] include: - - splits: 3 + - splits: 2 steps: - uses: actions/checkout@v3 with: @@ -293,6 +299,11 @@ jobs: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false runs-on: ubuntu-latest timeout-minutes: 120 + strategy: + matrix: + group: [1, 2] + include: + - splits: 2 steps: - uses: actions/checkout@v3 with: @@ -309,7 +320,8 @@ jobs: - uses: ./.github/actions/pipdeptree - name: Run tests run: | - pytest --durations=30 tests/pyfunc --ignore tests/pyfunc/test_spark_connect.py + pytest --splits=${{ matrix.splits }} --group=${{ matrix.group }} --durations=30 \ + tests/pyfunc --ignore tests/pyfunc/test_spark_connect.py # test_spark_connect.py fails if it's run with ohter tests, so run it separately. pytest tests/pyfunc/test_spark_connect.py @@ -337,6 +349,11 @@ jobs: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false runs-on: windows-latest timeout-minutes: 120 + strategy: + matrix: + group: [1, 2] + include: + - splits: 2 steps: - uses: actions/checkout@v3 with: @@ -375,7 +392,9 @@ jobs: export HADOOP_HOME=/tmp/winutils/hadoop-3.2.2 export PATH=$PATH:$HADOOP_HOME/bin # Run Windows tests - pytest --ignore-flavors --ignore=tests/projects --ignore=tests/examples tests --ignore=tests/recipes --ignore=tests/evaluate + pytest --splits=${{ matrix.splits }} --group=${{ matrix.group }} \ + --ignore-flavors --ignore=tests/projects --ignore=tests/examples --ignore=tests/recipes --ignore=tests/evaluate \ + tests # MLeap is incompatible on Windows with PySpark3.4 release. # Reinstate tests when MLeap has released a fix. [ML-30491] # pytest tests/mleap diff --git a/tests/tracking/request_header/test_registry.py b/tests/tracking/request_header/test_registry.py index 6eba4f701e207..f2295b5caf528 100644 --- a/tests/tracking/request_header/test_registry.py +++ b/tests/tracking/request_header/test_registry.py @@ -15,6 +15,12 @@ # pylint: disable=unused-argument +@pytest.fixture(autouse=True) +def reload_registry(): + yield + reload(mlflow.tracking.request_header.registry) + + def test_request_header_context_provider_registry_register(): provider_class = mock.Mock() diff --git a/tests/tracking/test_client.py b/tests/tracking/test_client.py index 571142b55d821..540fe67390f13 100644 --- a/tests/tracking/test_client.py +++ b/tests/tracking/test_client.py @@ -30,6 +30,12 @@ ) +@pytest.fixture(autouse=True) +def reset_registry_uri(): + yield + set_registry_uri(None) + + @pytest.fixture def mock_store(): with mock.patch("mlflow.tracking._tracking_service.utils._get_store") as mock_get_store: From 2cc010eeed8ff7b8bc56c174512a1322fd2d3088 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Sun, 22 Oct 2023 22:32:54 -0700 Subject: [PATCH 052/101] Simplify parameters usage for eval_fn (custom metrics) (#10057) Signed-off-by: Ann Zhang --- mlflow/models/evaluation/default_evaluator.py | 7 +++--- tests/evaluate/test_default_evaluator.py | 24 +++++++++---------- 2 files changed, 14 insertions(+), 17 deletions(-) diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index 0ace3417b9b4a..607dae155ebf5 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1152,12 +1152,11 @@ def _get_args_for_metrics(self, extra_metric, eval_df): parameters = inspect.signature(extra_metric.eval_fn).parameters eval_fn_args = [] params_not_found = [] + # eval_fn has parameters (eval_df, builtin_metrics) for backwards compatibility if len(parameters) == 2: eval_fn_args.append(eval_df_copy) - if "metrics" in parameters.keys(): - eval_fn_args.append(copy.deepcopy(self.metrics_values)) - else: - eval_fn_args.append(copy.deepcopy(self.metrics)) + eval_fn_args.append(copy.deepcopy(self.metrics)) + # eval_fn can have parameters like (predictions, targets, metrics, random_col) else: for param_name, param in parameters.items(): column = self.col_mapping.get(param_name, param_name) diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index 5e654f852ce5a..b67a6147179a7 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -1349,14 +1349,12 @@ def old_fn(eval_df, builtin_metrics): assert res_metric.justifications is None assert res_metric.aggregate_results["old_fn"] == builtin_metrics["mean_absolute_error"] * 1.5 - new_eval_fn_args = [eval_df, metrics] + new_eval_fn_args = [eval_df, None, metrics] - def new_fn_with_type_hint(eval_df, metrics: Dict[str, MetricValue]): + def new_fn(predictions, targets=None, metrics=None): return metrics["mean_absolute_error"].aggregate_results["mean_absolute_error"] * 1.5 - res_metric = _evaluate_extra_metric( - _CustomMetric(new_fn_with_type_hint, "new_fn", 0), new_eval_fn_args - ) + res_metric = _evaluate_extra_metric(_CustomMetric(new_fn, "new_fn", 0), new_eval_fn_args) assert res_metric.scores is None assert res_metric.justifications is None assert res_metric.aggregate_results["new_fn"] == builtin_metrics["mean_absolute_error"] * 1.5 @@ -1504,10 +1502,10 @@ def test_evaluate_custom_metric_success(): eval_df["target"], eval_df["prediction"], sample_weights=None ) - def example_count_times_1_point_5(eval_df, metrics: Dict[str, MetricValue]): + def example_count_times_1_point_5(predictions, targets=None, metrics=None): return MetricValue( - scores=[score * 1.5 for score in eval_df["prediction"].tolist()], - justifications=["justification"] * len(eval_df["prediction"]), + scores=[score * 1.5 for score in predictions.tolist()], + justifications=["justification"] * len(predictions), aggregate_results={ "example_count_times_1_point_5": metrics["example_count"].aggregate_results[ "example_count" @@ -1516,7 +1514,7 @@ def example_count_times_1_point_5(eval_df, metrics: Dict[str, MetricValue]): }, ) - eval_fn_args = [eval_df, _get_aggregate_metrics_values(builtin_metrics)] + eval_fn_args = [eval_df["prediction"], None, _get_aggregate_metrics_values(builtin_metrics)] res_metric = _evaluate_extra_metric( _CustomMetric(example_count_times_1_point_5, "", 0), eval_fn_args ) @@ -1593,7 +1591,7 @@ def example_custom_artifact_2(_, __, ___): def test_custom_metric_mixed(binary_logistic_regressor_model_uri, breast_cancer_dataset): - def true_count(_eval_df, metrics: Dict[str, MetricValue]): + def true_count(predictions, targets=None, metrics=None): true_negatives = metrics["true_negatives"].aggregate_results["true_negatives"] true_positives = metrics["true_positives"].aggregate_results["true_positives"] return MetricValue(aggregate_results={"true_count": true_negatives + true_positives}) @@ -2602,7 +2600,7 @@ def test_evaluate_text_and_text_metrics(): assert set(results.metrics.keys()) == set(get_text_metrics_keys()) -def very_toxic(eval_df, metrics: Dict[str, MetricValue]): +def very_toxic(predictions, targets=None, metrics=None): new_scores = [1.0 if score > 0.9 else 0.0 for score in metrics["toxicity/v1"].scores] return MetricValue( scores=new_scores, @@ -2611,8 +2609,8 @@ def very_toxic(eval_df, metrics: Dict[str, MetricValue]): ) -def per_row_metric(eval_df, metrics: Dict[str, MetricValue]): - return MetricValue(scores=[1] * len(eval_df["prediction"])) +def per_row_metric(predictions, targets=None, metrics=None): + return MetricValue(scores=[1] * len(predictions)) def test_evaluate_text_custom_metrics(): From 3168e5b0827437f7773d30c6429abae805167d8a Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Mon, 23 Oct 2023 16:17:42 +0900 Subject: [PATCH 053/101] Fix `score_model_as_udf` (#10065) Signed-off-by: harupy --- tests/pyfunc/test_spark.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/pyfunc/test_spark.py b/tests/pyfunc/test_spark.py index 53affa8c6d469..ea7ce0ef43140 100644 --- a/tests/pyfunc/test_spark.py +++ b/tests/pyfunc/test_spark.py @@ -57,8 +57,7 @@ types = [np.int32, int, str, np.float32, np.double, bool] -def score_model_as_udf(model_uri, pandas_df, result_type="double"): - spark = pyspark.sql.SparkSession.getActiveSession() +def score_spark(spark, model_uri, pandas_df, result_type="double"): spark_df = spark.createDataFrame(pandas_df).coalesce(1) pyfunc_udf = spark_udf( spark=spark, model_uri=model_uri, result_type=result_type, env_manager="local" @@ -67,6 +66,16 @@ def score_model_as_udf(model_uri, pandas_df, result_type="double"): return [x["prediction"] for x in new_df.collect()] +def score_model_as_udf(model_uri, pandas_df, result_type="double"): + if spark := pyspark.sql.SparkSession.getActiveSession(): + # Reuse the active SparkSession, don't kill it after use + return score_spark(spark, model_uri, pandas_df, result_type) + + # Create a new SparkSession, kill it after use + with get_spark_session(pyspark.SparkConf()) as spark: + return score_spark(spark, model_uri, pandas_df, result_type) + + class ConstantPyfuncWrapper: @staticmethod def predict(model_input): From 15cd8327e80f1ff012dba0d235336a96de87abce Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Mon, 23 Oct 2023 01:01:43 -0700 Subject: [PATCH 054/101] Introduce answer correctness and remove strict correctness (#10053) Signed-off-by: Ann Zhang --- docs/source/python_api/mlflow.metrics.rst | 4 +- mlflow/metrics/__init__.py | 4 +- mlflow/metrics/genai/metric_definitions.py | 54 +++++++++--------- mlflow/metrics/genai/prompts/v1.py | 65 ++++++++++++++-------- tests/metrics/genai/test_genai_metrics.py | 30 +++++----- 5 files changed, 85 insertions(+), 72 deletions(-) diff --git a/docs/source/python_api/mlflow.metrics.rst b/docs/source/python_api/mlflow.metrics.rst index 9f8a9acbdf414..350b6fdbc0881 100644 --- a/docs/source/python_api/mlflow.metrics.rst +++ b/docs/source/python_api/mlflow.metrics.rst @@ -102,7 +102,7 @@ We provide the following pre-canned "intelligent" :py:class:`EvaluationMetric EvaluationMetric: "binary_f1_score", "answer_similarity", "relevance", - "strict_correctness", + "answer_correctness", "token_count", "latency", ] diff --git a/mlflow/metrics/genai/metric_definitions.py b/mlflow/metrics/genai/metric_definitions.py index 3317d3a1079c8..862d342334fc2 100644 --- a/mlflow/metrics/genai/metric_definitions.py +++ b/mlflow/metrics/genai/metric_definitions.py @@ -20,11 +20,11 @@ def answer_similarity( """ This function will create a genai metric used to evaluate the answer similarity of an LLM using the model provided. Answer similarity will be assessed by the semantic similarity of the - output to the ``ground_truth``, which should be specified in the ``target`` column. + output to the ``ground_truth``, which should be specified in the ``targets`` column. - The ``target`` eval_arg must be provided as part of the input dataset or output - predictions. This can be mapped to a column of a different name using the a ``col_mapping`` - in the ``evaluator_config``. + The ``targets`` eval_arg must be provided as part of the input dataset or output + predictions. This can be mapped to a column of a different name using ``col_mapping`` + in the ``evaluator_config`` parameter, or using the ``targets`` parameter in mlflow.evaluate(). An MlflowException will be raised if the specified version for this metric does not exist. @@ -78,34 +78,30 @@ def answer_similarity( @experimental -def strict_correctness( +def answer_correctness( model: Optional[str] = None, metric_version: Optional[str] = None, examples: Optional[List[EvaluationExample]] = None, judge_request_timeout=60, ) -> EvaluationMetric: """ - This function will create a genai metric used to evaluate the strict correctness of an LLM - using the model provided. Strict correctness should be used in cases where correctness is - binary, and the source of truth is provided in the ``ground_truth``. Outputs will be - given either the highest or lowest score depending on if they are consistent with the - ``ground_truth``. When dealing with inputs that may have multiple correct outputs, varying - degrees of correctness, or when considering other factors such as the comprehensiveness of - the output, it is more appropriate to use the correctness metric instead. - - The ``ground_truth`` eval_arg must be provided as part of the input dataset or output - predictions. This can be mapped to a column of a different name using the a ``col_mapping`` - in the ``evaluator_config``. + This function will create a genai metric used to evaluate the answer correctness of an LLM + using the model provided. Answer correctness will be assessed by the accuracy of the provided + output based on the ``ground_truth``, which should be specified in the ``targets`` column. + + The ``targets`` eval_arg must be provided as part of the input dataset or output + predictions. This can be mapped to a column of a different name using ``col_mapping`` + in the ``evaluator_config`` parameter, or using the ``targets`` parameter in mlflow.evaluate(). An MlflowException will be raised if the specified version for this metric does not exist. :param model: (Optional) The model that will be used to evaluate this metric. Defaults to gpt-4. Your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. - :param metric_version: (Optional) The version of the strict correctness metric to use. + :param metric_version: (Optional) The version of the answer correctness metric to use. Defaults to the latest version. :param examples: (Optional) Provide a list of examples to help the judge model evaluate the - strict correctness. It is highly recommended to add examples to be used as a reference to + answer correctness. It is highly recommended to add examples to be used as a reference to evaluate the new results. :param judge_request_timeout: (Optional) The timeout in seconds for the judge API request. Defaults to 60 seconds. @@ -113,35 +109,35 @@ def strict_correctness( """ if metric_version is None: metric_version = _get_latest_metric_version() - class_name = f"mlflow.metrics.genai.prompts.{metric_version}.StrictCorrectnessMetric" + class_name = f"mlflow.metrics.genai.prompts.{metric_version}.AnswerCorrectnessMetric" try: - strict_correctness_class_module = _get_class_from_string(class_name) + answer_correctness_class_module = _get_class_from_string(class_name) except ModuleNotFoundError: raise MlflowException( - f"Failed to find strict correctness metric for version {metric_version}." + f"Failed to find answer correctness metric for version {metric_version}." f"Please check the version", error_code=INVALID_PARAMETER_VALUE, ) from None except Exception as e: raise MlflowException( - f"Failed to construct strict correctness metric {metric_version}. Error: {e!r}", + f"Failed to construct answer correctness metric {metric_version}. Error: {e!r}", error_code=INTERNAL_ERROR, ) from None if examples is None: - examples = strict_correctness_class_module.default_examples + examples = answer_correctness_class_module.default_examples if model is None: - model = strict_correctness_class_module.default_model + model = answer_correctness_class_module.default_model return make_genai_metric( - name="strict_correctness", - definition=strict_correctness_class_module.definition, - grading_prompt=strict_correctness_class_module.grading_prompt, + name="answer_correctness", + definition=answer_correctness_class_module.definition, + grading_prompt=answer_correctness_class_module.grading_prompt, examples=examples, version=metric_version, model=model, - grading_context_columns=strict_correctness_class_module.grading_context_columns, - parameters=strict_correctness_class_module.parameters, + grading_context_columns=answer_correctness_class_module.grading_context_columns, + parameters=answer_correctness_class_module.parameters, aggregations=["mean", "variance", "p90"], greater_is_better=True, judge_request_timeout=judge_request_timeout, diff --git a/mlflow/metrics/genai/prompts/v1.py b/mlflow/metrics/genai/prompts/v1.py index 255d5d50819f1..45a28e44424fc 100644 --- a/mlflow/metrics/genai/prompts/v1.py +++ b/mlflow/metrics/genai/prompts/v1.py @@ -221,32 +221,45 @@ class RelevanceMetric: @dataclass -class StrictCorrectnessMetric: +class AnswerCorrectnessMetric: definition = ( - "When a question demands a specific value, term, or description (e.g., math questions or " - "fact-checking), correctness is binary. Strict correctness of the output is assessed on " - "whether it aligns exactly with the ground truth. Scores are assigned to be 0 or 1." + "Answer correctness is evaluated on the accuracy of the provided output based on the " + "provided targets, which is the ground truth. Scores can be assigned based on the degree " + "of semantic similarity and factual correctness of the provided output to the provided " + "targets, where a higher score indicates higher degree of accuracy." ) grading_prompt = ( - "Strict Correctness: Below are the details for different scores:" - "- Score 0: the output is completely incorrect, doesn't mention anything about the " - "question or is completely contrary to the ground truth." - "- Score 1: the output answers the question correctly as provided in the ground truth." + "Answer Correctness: Below are the details for different scores:\n" + "- Score 1: the output is completely incorrect. It is completely different from or " + "contradicts the provided targets.\n" + "- Score 2: the output demonstrates some degree of semantic similarity and includes " + "partially correct information. However, the output still has significant discrepancies " + "with the provided targets or inaccuracies.\n" + "- Score 3: the output addresses a couple of aspects of the input accurately, aligning " + "with the provided targets. However, there are still omissions or minor inaccuracies.\n" + "- Score 4: the output is mostly correct. It provides mostly accurate information, but " + "there may be one or more minor omissions or inaccuracies.\n" + "- Score 5: the output is correct. It demonstrates a high degree of accuracy and " + "semantic similarity to the targets." ) grading_context_columns = ["targets"] parameters = default_parameters default_model = default_model - example_score_0 = EvaluationExample( - input="Is MLflow open-source?", - output="No, MLflow is not open-source.", - score=0, - justification="The output is incorrect. It states that MLflow is not open-source, which " - "contradicts the provided context, where it is explicitly mentioned that MLflow is an " - "open-source platform. This directly opposes the ground truth, resulting in a score of 0 " - "for strict correctness.", + example_score_2 = EvaluationExample( + input="How is MLflow related to Databricks?", + output="Databricks is a data engineering and analytics platform designed to help " + "organizations process and analyze large amounts of data. Databricks is a company " + "specializing in big data and machine learning solutions.", + score=2, + justification="The output provided by the model does demonstrate some degree of semantic " + "similarity to the targets, as it correctly identifies Databricks as a company " + "specializing in big data and machine learning solutions. However, it fails to address " + "the main point of the input question, which is the relationship between MLflow and " + "Databricks. The output does not mention MLflow at all, which is a significant discrepancy " + "with the provided targets. Therefore, the model's answer_correctness score is 2.", grading_context={ "targets": "MLflow is an open-source platform for managing the end-to-end machine " "learning (ML) lifecycle. It was developed by Databricks, a company that specializes " @@ -256,13 +269,17 @@ class StrictCorrectnessMetric: }, ) - example_score_1 = EvaluationExample( - input="Is MLflow open-source?", - output="MLflow is open-source, which means it's freely available for anyone to use.", - score=1, - justification="The output correctly states that MLflow is open-source, aligning perfectly " - "with the provided context. It accurately reflects the ground truth information, earning " - "a score of 1 for strict correctness.", + example_score_4 = EvaluationExample( + input="How is MLflow related to Databricks?", + output="MLflow is a product created by Databricks to enhance the efficiency of machine " + "learning processes.", + score=4, + justification="The output provided by the model is mostly correct. It correctly identifies " + "that MLflow is a product created by Databricks. However, it does not mention that MLflow " + "is an open-source platform for managing the end-to-end machine learning lifecycle, which " + "is a significant part of its function. Therefore, while the output is mostly accurate, " + "it has a minor omission, which is why it gets a score of 4 according to the grading " + "rubric.", grading_context={ "targets": "MLflow is an open-source platform for managing the end-to-end machine " "learning (ML) lifecycle. It was developed by Databricks, a company that specializes " @@ -272,4 +289,4 @@ class StrictCorrectnessMetric: }, ) - default_examples = [example_score_0, example_score_1] + default_examples = [example_score_2, example_score_4] diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index 5ed6f71556294..86e286c34ad3f 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -15,14 +15,14 @@ make_genai_metric, ) from mlflow.metrics.genai.metric_definitions import ( + answer_correctness, answer_similarity, relevance, - strict_correctness, ) from mlflow.metrics.genai.prompts.v1 import ( + AnswerCorrectnessMetric, AnswerSimilarityMetric, RelevanceMetric, - StrictCorrectnessMetric, ) openai_justification1 = ( @@ -648,17 +648,17 @@ def test_relevance_metric(): ) -def test_strict_correctness_metric(): - strict_correctness_metric = strict_correctness() +def test_answer_correctness_metric(): + answer_correctness_metric = answer_correctness() input = "What is MLflow?" - examples = "\n".join([str(example) for example in StrictCorrectnessMetric.default_examples]) + examples = "\n".join([str(example) for example in AnswerCorrectnessMetric.default_examples]) with mock.patch.object( model_utils, "score_model_on_payload", return_value=properly_formatted_openai_response1, ) as mock_predict_function: - metric_value = strict_correctness_metric.eval_fn( + metric_value = answer_correctness_metric.eval_fn( pd.Series([mlflow_prediction]), {}, pd.Series([input]), @@ -671,8 +671,8 @@ def test_strict_correctness_metric(): "sent to a machine\nlearning model, and you will be given an output that the model " "produced. You\nmay also be given additional information that was used by the model " "to generate the output.\n\nYour task is to determine a numerical score called " - "strict_correctness based on the input and output.\nA definition of " - "strict_correctness and a grading rubric are provided below.\nYou must use the " + "answer_correctness based on the input and output.\nA definition of " + "answer_correctness and a grading rubric are provided below.\nYou must use the " "grading rubric to determine your score. You must also justify your score." "\n\nExamples could be included below for reference. Make sure to use them as " "references and to\nunderstand them before completing the task.\n" @@ -680,15 +680,15 @@ def test_strict_correctness_metric(): f"\nOutput:\n{mlflow_prediction}\n" "\nAdditional information used by the model:\nkey: targets\nvalue:\n" f"{mlflow_ground_truth}\n" - f"\nMetric definition:\n{StrictCorrectnessMetric.definition}\n" - f"\nGrading rubric:\n{StrictCorrectnessMetric.grading_prompt}\n" + f"\nMetric definition:\n{AnswerCorrectnessMetric.definition}\n" + f"\nGrading rubric:\n{AnswerCorrectnessMetric.grading_prompt}\n" "\nExamples:\n" f"{examples}\n" "\nYou must return the following fields in your response one below the other:\nscore: " - "Your numerical score for the model's strict_correctness based on the " + "Your numerical score for the model's answer_correctness based on the " "rubric\njustification: Your step-by-step reasoning about the model's " - "strict_correctness score\n ", - **StrictCorrectnessMetric.parameters, + "answer_correctness score\n ", + **AnswerCorrectnessMetric.parameters, } assert metric_value.scores == [3] @@ -702,9 +702,9 @@ def test_strict_correctness_metric(): with pytest.raises( MlflowException, - match="Failed to find strict correctness metric for version non-existent-version", + match="Failed to find answer correctness metric for version non-existent-version", ): - strict_correctness_metric = strict_correctness(metric_version="non-existent-version") + answer_correctness(metric_version="non-existent-version") def test_make_genai_metric_metric_details(): From 3f74399a1839c64302990cf1a269fadb4f75e5d9 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Mon, 23 Oct 2023 02:17:54 -0700 Subject: [PATCH 055/101] Replace relevance with faithfulness (#10070) Signed-off-by: Ann Zhang --- docs/source/python_api/mlflow.metrics.rst | 4 +- mlflow/metrics/__init__.py | 4 +- mlflow/metrics/genai/metric_definitions.py | 44 +++++++------- mlflow/metrics/genai/prompts/v1.py | 70 ++++++++++++---------- tests/metrics/genai/test_genai_metrics.py | 28 ++++----- 5 files changed, 78 insertions(+), 72 deletions(-) diff --git a/docs/source/python_api/mlflow.metrics.rst b/docs/source/python_api/mlflow.metrics.rst index 350b6fdbc0881..5930368faa7ab 100644 --- a/docs/source/python_api/mlflow.metrics.rst +++ b/docs/source/python_api/mlflow.metrics.rst @@ -104,7 +104,7 @@ We provide the following pre-canned "intelligent" :py:class:`EvaluationMetric ` using the :py:func:`make_genai_metric ` factory function. @@ -118,4 +118,4 @@ When using LLM based :py:class:`EvaluationMetric EvaluationMetric: "binary_precision", "binary_f1_score", "answer_similarity", - "relevance", + "faithfulness", "answer_correctness", "token_count", "latency", diff --git a/mlflow/metrics/genai/metric_definitions.py b/mlflow/metrics/genai/metric_definitions.py index 862d342334fc2..2977adbeaa12d 100644 --- a/mlflow/metrics/genai/metric_definitions.py +++ b/mlflow/metrics/genai/metric_definitions.py @@ -145,66 +145,64 @@ def answer_correctness( @experimental -def relevance( +def faithfulness( model: Optional[str] = None, - metric_version: Optional[str] = None, + metric_version: Optional[str] = _get_latest_metric_version(), examples: Optional[List[EvaluationExample]] = None, judge_request_timeout=60, ) -> EvaluationMetric: """ - This function will create a genai metric used to evaluate the relevance of an LLM using the - model provided. Relevance will be assessed by the appropriateness, significance, and - applicability of the output with respect to the ``input`` and ``context``. + This function will create a genai metric used to evaluate the faithfullness of an LLM using the + model provided. Faithfulness will be assessed based on how factually consistent the output + is to the ``context``. - The ``input`` and ``context`` args must be provided as part of the input dataset or output - predictions. This can be mapped to a column of a different name using the a ``col_mapping`` - in the ``evaluator_config``. + The ``context`` eval_arg must be provided as part of the input dataset or output + predictions. This can be mapped to a column of a different name using ``col_mapping`` + in the ``evaluator_config`` parameter. An MlflowException will be raised if the specified version for this metric does not exist. :param model: (Optional) The model that will be used to evaluate this metric. Defaults to gpt-4. Your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. - :param metric_version: (Optional) The version of the relevance metric to use. + :param metric_version: (Optional) The version of the faithfulness metric to use. Defaults to the latest version. :param examples: (Optional) Provide a list of examples to help the judge model evaluate the - relevance. It is highly recommended to add examples to be used as a reference to evaluate + faithfulness. It is highly recommended to add examples to be used as a reference to evaluate the new results. :param judge_request_timeout: (Optional) The timeout in seconds for the judge API request. Defaults to 60 seconds. :return: A metric object """ - if metric_version is None: - metric_version = _get_latest_metric_version() - class_name = f"mlflow.metrics.genai.prompts.{metric_version}.RelevanceMetric" + class_name = f"mlflow.metrics.genai.prompts.{metric_version}.FaithfulnessMetric" try: - relevance_class_module = _get_class_from_string(class_name) + faithfulness_class_module = _get_class_from_string(class_name) except ModuleNotFoundError: raise MlflowException( - f"Failed to find relevance metric for version {metric_version}." + f"Failed to find faithfulness metric for version {metric_version}." f" Please check the version", error_code=INVALID_PARAMETER_VALUE, ) from None except Exception as e: raise MlflowException( - f"Failed to construct relevance metric {metric_version}. Error: {e!r}", + f"Failed to construct faithfulness metric {metric_version}. Error: {e!r}", error_code=INTERNAL_ERROR, ) from None if examples is None: - examples = relevance_class_module.default_examples + examples = faithfulness_class_module.default_examples if model is None: - model = relevance_class_module.default_model + model = faithfulness_class_module.default_model return make_genai_metric( - name="relevance", - definition=relevance_class_module.definition, - grading_prompt=relevance_class_module.grading_prompt, + name="faithfulness", + definition=faithfulness_class_module.definition, + grading_prompt=faithfulness_class_module.grading_prompt, examples=examples, version=metric_version, model=model, - grading_context_columns=relevance_class_module.grading_context_columns, - parameters=relevance_class_module.parameters, + grading_context_columns=faithfulness_class_module.grading_context_columns, + parameters=faithfulness_class_module.parameters, aggregations=["mean", "variance", "p90"], greater_is_better=True, judge_request_timeout=judge_request_timeout, diff --git a/mlflow/metrics/genai/prompts/v1.py b/mlflow/metrics/genai/prompts/v1.py index 45a28e44424fc..3da6459812a35 100644 --- a/mlflow/metrics/genai/prompts/v1.py +++ b/mlflow/metrics/genai/prompts/v1.py @@ -154,23 +154,28 @@ class AnswerSimilarityMetric: @dataclass -class RelevanceMetric: +class FaithfulnessMetric: definition = ( - "Relevance encompasses the appropriateness, significance, and applicability of the output " - "with respect to the input and context. Scores should range from 1 to 5 and should reflect " - "the extent to which the output directly addresses the question provided in the input, " - "given the provided context." + "Faithfulness is only evaluated with the provided output and provided context, please " + "ignore the provided input entirely when scoring faithfulness. Faithfulness assesses " + "how much of the provided output is factually consistent with the provided context. A " + "higher score indicates that a higher proportion of claims present in the output can be " + "derived from the provided context. Faithfulness does not consider how much extra " + "information from the context is not present in the output." ) grading_prompt = ( - "Relevance: Below are the details for different scores:" - "- Score 1: the output doesn't mention anything about the question or is completely " - "irrelevant to the provided context." - "- Score 2: the output provides some relevance to the question and is somehow related to " - "the provided context." - "- Score 3: the output mostly answers the question and is consistent with the provided " - "context." - "- Score 5: the output answers the question comprehensively using the provided context." + "Faithfulness: Below are the details for different scores:\n" + "- Score 1: None of the claims in the output can be inferred from the provided context.\n" + "- Score 2: Some of the claims in the output can be inferred from the provided context, " + "but the majority of the output is missing from, inconsistent with, or contradictory to " + "the provided context.\n" + "- Score 3: Half or more of the claims in the output can be inferred from the provided " + "context.\n" + "- Score 4: Most of the claims in the output can be inferred from the provided context, " + "with very little information that is not directly supported by the provided context.\n" + "- Score 5: All of the claims in the output are directly supported by the provided " + "context, demonstrating high faithfulness to the provided context." ) grading_context_columns = ["context"] @@ -179,15 +184,18 @@ class RelevanceMetric: example_score_2 = EvaluationExample( input="How is MLflow related to Databricks?", - output="Databricks is a data engineering and analytics platform designed to help " - "organizations process and analyze large amounts of data. Databricks is a company " - "specializing in big data and machine learning solutions.", + output="Databricks is a company that specializes in big data and machine learning " + "solutions. MLflow has nothing to do with Databricks. MLflow is an open-source platform " + "for managing the end-to-end machine learning (ML) lifecycle.", score=2, - justification="The output provides relevant information about Databricks, mentioning it as " - "a company specializing in big data and machine learning solutions. However, it doesn't " - "directly address how MLflow is related to Databricks, which is the specific question " - "asked in the input. Therefore, the output is only somewhat related to the provided " - "context.", + justification='The output claims that "MLflow has nothing to do with Databricks" which is ' + 'contradictory to the provided context that states "It was developed by Databricks". This ' + 'is a major inconsistency. However, the output correctly identifies that "MLflow is an ' + 'open-source platform for managing the end-to-end machine learning (ML) lifecycle" and ' + '"Databricks is a company that specializes in big data and machine learning solutions", ' + "which are both supported by the context. Therefore, some of the claims in the output can " + "be inferred from the provided context, but the majority of the output is inconsistent " + "with the provided context, leading to a faithfulness score of 2.", grading_context={ "context": "MLflow is an open-source platform for managing the end-to-end machine " "learning (ML) lifecycle. It was developed by Databricks, a company that specializes " @@ -197,16 +205,16 @@ class RelevanceMetric: }, ) - example_score_4 = EvaluationExample( + example_score_5 = EvaluationExample( input="How is MLflow related to Databricks?", - output="MLflow is a product created by Databricks to enhance the efficiency of machine " - "learning processes.", - score=4, - justification="The output provides a relevant and accurate statement about the " - "relationship between MLflow and Databricks. While it doesn't provide extensive detail, " - "it still offers a substantial and meaningful response. To achieve a score of 5, the " - "response could be further improved by providing additional context or details about" - "how MLflow specifically functions within the Databricks ecosystem.", + output="Databricks is a company that specializes in big data and machine learning " + "solutions.", + score=5, + justification='The output states that "Databricks is a company that specializes in big data' + ' and machine learning solutions." This claim is directly supported by the context, which ' + 'states "It was developed by Databricks, a company that specializes in big data and ' + 'machine learning solutions." Therefore, the faithfulness score is 5 as all the claims in ' + 'the output are directly supported by the provided context."', grading_context={ "context": "MLflow is an open-source platform for managing the end-to-end " "machine learning (ML) lifecycle. It was developed by Databricks, a company " @@ -217,7 +225,7 @@ class RelevanceMetric: }, ) - default_examples = [example_score_2, example_score_4] + default_examples = [example_score_2, example_score_5] @dataclass diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index 86e286c34ad3f..148b0817d4caa 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -17,12 +17,12 @@ from mlflow.metrics.genai.metric_definitions import ( answer_correctness, answer_similarity, - relevance, + faithfulness, ) from mlflow.metrics.genai.prompts.v1 import ( AnswerCorrectnessMetric, AnswerSimilarityMetric, - RelevanceMetric, + FaithfulnessMetric, ) openai_justification1 = ( @@ -588,8 +588,8 @@ def test_correctness_metric(): ) -def test_relevance_metric(): - relevance_metric = relevance(model="gateway:/gpt-3.5-turbo", examples=[]) +def test_faithfulness_metric(): + faithfulness_metric = faithfulness(model="gateway:/gpt-3.5-turbo", examples=[]) input = "What is MLflow?" with mock.patch.object( @@ -597,7 +597,7 @@ def test_relevance_metric(): "score_model_on_payload", return_value=properly_formatted_openai_response1, ) as mock_predict_function: - metric_value = relevance_metric.eval_fn( + metric_value = faithfulness_metric.eval_fn( pd.Series([mlflow_prediction]), {}, pd.Series([input]), @@ -610,8 +610,8 @@ def test_relevance_metric(): "sent to a machine\nlearning model, and you will be given an output that the model " "produced. You\nmay also be given additional information that was used by the model " "to generate the output.\n\nYour task is to determine a numerical score called " - "relevance based on the input and output.\nA definition of " - "relevance and a grading rubric are provided below.\nYou must use the " + "faithfulness based on the input and output.\nA definition of " + "faithfulness and a grading rubric are provided below.\nYou must use the " "grading rubric to determine your score. You must also justify your score." "\n\nExamples could be included below for reference. Make sure to use them as " "references and to\nunderstand them before completing the task.\n" @@ -619,14 +619,14 @@ def test_relevance_metric(): f"\nOutput:\n{mlflow_prediction}\n" "\nAdditional information used by the model:\nkey: context\nvalue:\n" f"{mlflow_ground_truth}\n" - f"\nMetric definition:\n{RelevanceMetric.definition}\n" - f"\nGrading rubric:\n{RelevanceMetric.grading_prompt}\n" + f"\nMetric definition:\n{FaithfulnessMetric.definition}\n" + f"\nGrading rubric:\n{FaithfulnessMetric.grading_prompt}\n" "\n\n" "\nYou must return the following fields in your response one below the other:\nscore: " - "Your numerical score for the model's relevance based on the " + "Your numerical score for the model's faithfulness based on the " "rubric\njustification: Your step-by-step reasoning about the model's " - "relevance score\n ", - **RelevanceMetric.parameters, + "faithfulness score\n ", + **FaithfulnessMetric.parameters, } assert metric_value.scores == [3] @@ -639,9 +639,9 @@ def test_relevance_metric(): } with pytest.raises( - MlflowException, match="Failed to find relevance metric for version non-existent-version" + MlflowException, match="Failed to find faithfulness metric for version non-existent-version" ): - relevance_metric = relevance( + faithfulness_metric = faithfulness( model="gateway:/gpt-3.5-turbo", metric_version="non-existent-version", examples=[mlflow_example], From f2baa689472d0dc4c73b1c2f97ac296253ab4454 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Mon, 23 Oct 2023 22:08:01 +0900 Subject: [PATCH 056/101] Cache `.venv` on windows job (#10068) Signed-off-by: harupy Signed-off-by: Harutaka Kawamura --- .github/actions/cache-pip/action.yml | 2 +- .github/workflows/master.yml | 26 +++++++++++++--------- mlflow/utils/git_utils.py | 2 ++ tests/tracking/context/test_git_context.py | 9 +++----- 4 files changed, 21 insertions(+), 18 deletions(-) diff --git a/.github/actions/cache-pip/action.yml b/.github/actions/cache-pip/action.yml index 64dd364b47d3c..945550fb77db5 100644 --- a/.github/actions/cache-pip/action.yml +++ b/.github/actions/cache-pip/action.yml @@ -11,5 +11,5 @@ runs: env: SEGMENT_DOWNLOAD_TIMEOUT_MINS: 1 with: - path: ~/.venv + path: .venv key: ${{ steps.py-cache-key.outputs.key }} diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index dfe29cbdc4fc3..5035f8c5dde69 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -46,21 +46,21 @@ jobs: - uses: ./.github/actions/cache-pip - name: Install dependencies run: | - python -m venv ~/.venv - . ~/.venv/bin/activate + python -m venv .venv + source .venv/bin/activate source ./dev/install-common-deps.sh --ml pip install -r requirements/lint-requirements.txt - uses: ./.github/actions/pipdeptree - name: Install pre-commit hooks run: | - . ~/.venv/bin/activate + source .venv/bin/activate pre-commit install -t pre-commit -t prepare-commit-msg - name: Run pre-commit id: pre-commit env: IS_MAINTAINER: ${{ contains(fromJSON('["OWNER", "MEMBER", "COLLABORATOR"]'), github.event.pull_request.author_association )}} run: | - . ~/.venv/bin/activate + source .venv/bin/activate pre-commit run --all-files # python-skinny tests cover a subset of mlflow functionality @@ -105,19 +105,19 @@ jobs: - uses: ./.github/actions/cache-pip - name: Install dependencies run: | - python -m venv ~/.venv - . ~/.venv/bin/activate + python -m venv .venv + source .venv/bin/activate source ./dev/install-common-deps.sh --ml # pyspark 3.5 is incompatible with delta 2.4 pip install 'pyspark<3.5' - uses: ./.github/actions/pipdeptree - name: Import check run: | - . ~/.venv/bin/activate + source .venv/bin/activate python tests/check_mlflow_lazily_imports_ml_packages.py - name: Run tests run: | - . ~/.venv/bin/activate + source .venv/bin/activate source dev/setup-ssh.sh pytest --splits=${{ matrix.splits }} --group=${{ matrix.group }} --quiet --requires-ssh \ --ignore-flavors --ignore=tests/examples --ignore=tests/recipes --ignore=tests/evaluate \ @@ -228,13 +228,13 @@ jobs: - uses: ./.github/actions/cache-pip - name: Install dependencies run: | - python -m venv ~/.venv - . ~/.venv/bin/activate + python -m venv .venv + source .venv/bin/activate source ./dev/install-common-deps.sh --ml - uses: ./.github/actions/pipdeptree - name: Run tests run: | - . ~/.venv/bin/activate + source .venv/bin/activate pytest \ tests/utils/test_model_utils.py \ tests/tracking/fluent/test_fluent_autolog.py \ @@ -362,8 +362,11 @@ jobs: - uses: ./.github/actions/setup-python - uses: ./.github/actions/setup-pyenv - uses: ./.github/actions/setup-java + - uses: ./.github/actions/cache-pip - name: Install python dependencies run: | + python -m venv .venv + source .venv/Scripts/activate pip install -r requirements/test-requirements.txt pip install --no-dependencies tests/resources/mlflow-test-plugin pip install -e .[extras] @@ -388,6 +391,7 @@ jobs: # it's explicitly disposed. MLFLOW_SQLALCHEMYSTORE_POOLCLASS: "NullPool" run: | + source .venv/Scripts/activate # Set Hadoop environment variables required for testing Spark integrations on Windows export HADOOP_HOME=/tmp/winutils/hadoop-3.2.2 export PATH=$PATH:$HADOOP_HOME/bin diff --git a/mlflow/utils/git_utils.py b/mlflow/utils/git_utils.py index 07ec2509bd329..39efe97ccdb53 100644 --- a/mlflow/utils/git_utils.py +++ b/mlflow/utils/git_utils.py @@ -46,6 +46,8 @@ def get_git_commit(path: str) -> Optional[str]: if os.path.isfile(path): path = os.path.dirname(path) repo = Repo(path, search_parent_directories=True) + if path in repo.ignored(path): + return None return repo.head.commit.hexsha except Exception: return None diff --git a/tests/tracking/context/test_git_context.py b/tests/tracking/context/test_git_context.py index 6097dc8cd8d06..d93c30437ba11 100644 --- a/tests/tracking/context/test_git_context.py +++ b/tests/tracking/context/test_git_context.py @@ -25,6 +25,7 @@ def patch_script_name(): def patch_git_repo(): mock_repo = mock.Mock() mock_repo.head.commit.hexsha = MOCK_COMMIT_HASH + mock_repo.ignored.return_value = [] with mock.patch("git.Repo", return_value=mock_repo): yield mock_repo @@ -45,13 +46,9 @@ def test_git_run_context_tags(patch_script_name, patch_git_repo): def test_git_run_context_caching(patch_script_name): """Check that the git commit hash is only looked up once.""" - mock_repo = mock.Mock() - mock_hexsha = mock.PropertyMock(return_value=MOCK_COMMIT_HASH) - type(mock_repo.head.commit).hexsha = mock_hexsha - - with mock.patch("git.Repo", return_value=mock_repo): + with mock.patch("git.Repo") as mock_repo: context = GitRunContext() context.in_context() context.tags() - assert mock_hexsha.call_count == 1 + mock_repo.assert_called_once() From 4e947e943908217e292fb7cc686cf88fc1aa675f Mon Sep 17 00:00:00 2001 From: Jerry Liang <66143562+jerrylian-db@users.noreply.github.com> Date: Mon, 23 Oct 2023 10:58:49 -0700 Subject: [PATCH 057/101] Create copy model version API for local file store (#9946) Signed-off-by: Jerry Liang Signed-off-by: mlflow-automation Signed-off-by: Jerry Liang <66143562+jerrylian-db@users.noreply.github.com> Co-authored-by: mlflow-automation Co-authored-by: Harutaka Kawamura --- mlflow/store/model_registry/abstract_store.py | 32 ++++ mlflow/store/model_registry/file_store.py | 138 +++++++++++++----- mlflow/tracking/_model_registry/client.py | 13 ++ mlflow/tracking/client.py | 27 ++++ tests/store/model_registry/test_file_store.py | 66 +++++++++ 5 files changed, 237 insertions(+), 39 deletions(-) diff --git a/mlflow/store/model_registry/abstract_store.py b/mlflow/store/model_registry/abstract_store.py index 56870304ce3ca..764ceead20a1e 100644 --- a/mlflow/store/model_registry/abstract_store.py +++ b/mlflow/store/model_registry/abstract_store.py @@ -2,8 +2,10 @@ from abc import ABCMeta, abstractmethod from time import sleep, time +from mlflow.entities.model_registry import ModelVersionTag from mlflow.entities.model_registry.model_version_status import ModelVersionStatus from mlflow.exceptions import MlflowException +from mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS from mlflow.utils.annotations import developer_stable _logger = logging.getLogger(__name__) @@ -321,6 +323,36 @@ def get_model_version_by_alias(self, name, alias): """ pass + @abstractmethod + def copy_model_version(self, src_mv, dst_name): + """ + Copy a model version from one registered model to another as a new model version. + + :param src_mv: A :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the source model version. + :param dst_name: the name of the registered model to copy the model version to. If a + registered model with this name does not exist, it will be created. + :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the cloned model version. + """ + pass + + def _copy_model_version_impl(self, src_mv, dst_name): + try: + self.create_registered_model(dst_name) + except MlflowException as e: + if e.error_code != RESOURCE_ALREADY_EXISTS: + raise + + return self.create_model_version( + name=dst_name, + source=f"models:/{src_mv.name}/{src_mv.version}", + run_id=src_mv.run_id, + tags=[ModelVersionTag(k, v) for k, v in src_mv.tags.items()], + run_link=src_mv.run_link, + description=src_mv.description, + ) + def _await_model_version_creation(self, mv, await_creation_for): """ Await for model version to become ready after creation. diff --git a/mlflow/store/model_registry/file_store.py b/mlflow/store/model_registry/file_store.py index 0dad30cf3ebbc..fa2b2c6a4df73 100644 --- a/mlflow/store/model_registry/file_store.py +++ b/mlflow/store/model_registry/file_store.py @@ -3,7 +3,9 @@ import shutil import sys import time +import urllib from os.path import join +from typing import List from mlflow.entities.model_registry import ( ModelVersion, @@ -27,6 +29,7 @@ RESOURCE_ALREADY_EXISTS, RESOURCE_DOES_NOT_EXIST, ) +from mlflow.store.artifact.utils.models import _parse_model_uri from mlflow.store.entities.paged_list import PagedList from mlflow.store.model_registry import ( DEFAULT_LOCAL_FILE_AND_ARTIFACT_PATH, @@ -78,6 +81,32 @@ def _validate_model_name(name): ) +class FileModelVersion(ModelVersion): + def __init__(self, storage_location=None, **kwargs): + super().__init__(**kwargs) + self._storage_location = storage_location + + @property + def storage_location(self): + """String. The storage location of the model version.""" + return self._storage_location + + @storage_location.setter + def storage_location(self, location): + self._storage_location = location + + @classmethod + def _properties(cls): + # aggregate with parent class with subclass properties + return sorted(ModelVersion._properties() + cls._get_properties_helper()) + + def to_mlflow_entity(self): + meta = dict(self) + return ModelVersion.from_dictionary( + {**meta, "tags": [ModelVersionTag(k, v) for k, v in meta["tags"].items()]} + ) + + class FileStore(AbstractStore): MODELS_FOLDER_NAME = "models" META_DATA_FILE_NAME = "meta.yaml" @@ -232,7 +261,7 @@ def rename_registered_model(self, name, new_name): self._save_registered_model_as_meta_file( registered_model, meta_dir=new_meta_dir, overwrite=False ) - model_versions = self._list_model_versions_under_path(model_path) + model_versions = self._list_file_model_versions_under_path(model_path) for mv in model_versions: mv.name = new_name mv.last_updated_timestamp = updated_time @@ -347,7 +376,7 @@ def get_registered_model(self, name): ) return self._get_registered_model_from_path(model_path) - def get_latest_versions(self, name, stages=None): + def get_latest_versions(self, name, stages=None) -> List[ModelVersion]: """ Latest version models for each requested stage. If no ``stages`` argument is provided, returns the latest version for each stage. @@ -363,7 +392,7 @@ def get_latest_versions(self, name, stages=None): f"Registered Model with name={name} not found", RESOURCE_DOES_NOT_EXIST, ) - model_versions = self._list_model_versions_under_path(registered_model_path) + model_versions = self._list_file_model_versions_under_path(registered_model_path) if stages is None or len(stages) == 0: expected_stages = {get_canonical_stage(stage) for stage in ALL_STAGES} else: @@ -375,7 +404,7 @@ def get_latest_versions(self, name, stages=None): mv.current_stage not in latest_versions or latest_versions[mv.current_stage].version < mv.version ): - latest_versions[mv.current_stage] = mv + latest_versions[mv.current_stage] = mv.to_mlflow_entity() return [latest_versions[stage] for stage in expected_stages if stage in latest_versions] @@ -474,12 +503,12 @@ def delete_registered_model_tag(self, name, key): # CRUD API for ModelVersion objects - def _get_registered_model_version_tag_from_file(self, parent_path, tag_name): + def _get_registered_model_version_tag_from_file(self, parent_path, tag_name) -> ModelVersionTag: _validate_tag_name(tag_name) tag_data = read_file(parent_path, tag_name) return ModelVersionTag(tag_name, tag_data) - def _get_model_version_tags_from_dir(self, directory): + def _get_model_version_tags_from_dir(self, directory) -> List[ModelVersionTag]: parent_path, tag_files = self._get_resource_files(directory, FileStore.TAGS_FOLDER_NAME) tags = [] for tag_file in tag_files: @@ -500,13 +529,15 @@ def _get_model_version_aliases(self, directory): version = os.path.basename(directory).replace("version-", "") return [alias.alias for alias in aliases if alias.version == version] - def _get_model_version_from_dir(self, directory): + def _get_file_model_version_from_dir(self, directory) -> FileModelVersion: meta = FileStore._read_yaml(directory, FileStore.META_DATA_FILE_NAME) meta["tags"] = self._get_model_version_tags_from_dir(directory) meta["aliases"] = self._get_model_version_aliases(directory) - return ModelVersion.from_dictionary(meta) + return FileModelVersion.from_dictionary(meta) - def _save_model_version_as_meta_file(self, model_version, meta_dir=None, overwrite=True): + def _save_model_version_as_meta_file( + self, model_version: FileModelVersion, meta_dir=None, overwrite=True + ): model_version_dict = dict(model_version) del model_version_dict["tags"] meta_dir = meta_dir or self._get_model_version_dir( @@ -534,7 +565,7 @@ def create_model_version( run_link=None, description=None, local_model_path=None, - ): + ) -> ModelVersion: """ Create a new model version from given source and run ID. @@ -551,7 +582,7 @@ def create_model_version( def next_version(registered_model_name): path = self._get_registered_model_path(registered_model_name) - model_versions = self._list_model_versions_under_path(path) + model_versions = self._list_file_model_versions_under_path(path) if model_versions: return max(mv.version for mv in model_versions) + 1 else: @@ -560,6 +591,18 @@ def next_version(registered_model_name): _validate_model_name(name) for tag in tags or []: _validate_model_version_tag(tag.key, tag.value) + storage_location = source + if urllib.parse.urlparse(source).scheme == "models": + (src_model_name, src_model_version, _, _) = _parse_model_uri(source) + try: + storage_location = self.get_model_version_download_uri( + src_model_name, src_model_version + ) + except Exception as e: + raise MlflowException( + f"Unable to fetch model from model URI source artifact location '{source}'." + f"Error: {e}" + ) from e for attempt in range(self.CREATE_MODEL_VERSION_RETRIES): try: creation_time = get_current_time_millis() @@ -567,7 +610,7 @@ def next_version(registered_model_name): registered_model.last_updated_timestamp = creation_time self._save_registered_model_as_meta_file(registered_model) version = next_version(name) - model_version = ModelVersion( + model_version = FileModelVersion( name=name, version=version, creation_timestamp=creation_time, @@ -579,6 +622,7 @@ def next_version(registered_model_name): run_link=run_link, tags=tags, aliases=[], + storage_location=storage_location, ) model_version_dir = self._get_model_version_dir(name, version) mkdir(model_version_dir) @@ -589,7 +633,7 @@ def next_version(registered_model_name): if tags is not None: for tag in tags: self.set_model_version_tag(name, version, tag) - return model_version + return model_version.to_mlflow_entity() except Exception as e: more_retries = self.CREATE_MODEL_VERSION_RETRIES - attempt - 1 logging.warning( @@ -604,7 +648,7 @@ def next_version(registered_model_name): f"{self.CREATE_MODEL_VERSION_RETRIES} attempts." ) - def update_model_version(self, name, version, description): + def update_model_version(self, name, version, description) -> ModelVersion: """ Update metadata associated with a model version in backend. @@ -614,13 +658,15 @@ def update_model_version(self, name, version, description): :return: A single :py:class:`mlflow.entities.model_registry.ModelVersion` object. """ updated_time = get_current_time_millis() - model_version = self.get_model_version(name=name, version=version) + model_version = self._fetch_file_model_version_if_exists(name=name, version=version) model_version.description = description model_version.last_updated_timestamp = updated_time self._save_model_version_as_meta_file(model_version) - return model_version + return model_version.to_mlflow_entity() - def transition_model_version_stage(self, name, version, stage, archive_existing_versions): + def transition_model_version_stage( + self, name, version, stage, archive_existing_versions + ) -> ModelVersion: """ Update model version stage. @@ -645,19 +691,19 @@ def transition_model_version_stage(self, name, version, stage, archive_existing_ model_versions = [] if archive_existing_versions: registered_model_path = self._get_registered_model_path(name) - model_versions = self._list_model_versions_under_path(registered_model_path) + model_versions = self._list_file_model_versions_under_path(registered_model_path) for mv in model_versions: if mv.version != version and mv.current_stage == get_canonical_stage(stage): mv.current_stage = STAGE_ARCHIVED mv.last_updated_timestamp = last_updated_time self._save_model_version_as_meta_file(mv) - model_version = self.get_model_version(name, version) + model_version = self._fetch_file_model_version_if_exists(name, version) model_version.current_stage = get_canonical_stage(stage) model_version.last_updated_timestamp = last_updated_time self._save_model_version_as_meta_file(model_version) self._update_registered_model_last_updated_time(name, last_updated_time) - return model_version + return model_version.to_mlflow_entity() def delete_model_version(self, name, version): """ @@ -667,7 +713,7 @@ def delete_model_version(self, name, version): :param version: Registered model version. :return: None """ - model_version = self.get_model_version(name=name, version=version) + model_version = self._fetch_file_model_version_if_exists(name=name, version=version) model_version.current_stage = STAGE_DELETED_INTERNAL updated_time = get_current_time_millis() model_version.last_updated_timestamp = updated_time @@ -676,14 +722,16 @@ def delete_model_version(self, name, version): for alias in model_version.aliases: self.delete_registered_model_alias(name, alias) - def _fetch_model_version_if_exists(self, name, version): + def _fetch_file_model_version_if_exists(self, name, version) -> FileModelVersion: + _validate_model_name(name) + _validate_model_version(version) registered_model_version_dir = self._get_model_version_dir(name, version) if not exists(registered_model_version_dir): raise MlflowException( f"Model Version (name={name}, version={version}) not found", RESOURCE_DOES_NOT_EXIST, ) - model_version = self._get_model_version_from_dir(registered_model_version_dir) + model_version = self._get_file_model_version_from_dir(registered_model_version_dir) if model_version.current_stage == STAGE_DELETED_INTERNAL: raise MlflowException( f"Model Version (name={name}, version={version}) not found", @@ -691,7 +739,7 @@ def _fetch_model_version_if_exists(self, name, version): ) return model_version - def get_model_version(self, name, version): + def get_model_version(self, name, version) -> ModelVersion: """ Get the model version instance by name and version. @@ -699,11 +747,9 @@ def get_model_version(self, name, version): :param version: Registered model version. :return: A single :py:class:`mlflow.entities.model_registry.ModelVersion` object. """ - _validate_model_name(name) - _validate_model_version(version) - return self._fetch_model_version_if_exists(name, version) + return self._fetch_file_model_version_if_exists(name, version).to_mlflow_entity() - def get_model_version_download_uri(self, name, version): + def get_model_version_download_uri(self, name, version) -> str: """ Get the download location in Model Registry for this model version. NOTE: For first version of Model Registry, since the models are not copied over to another @@ -713,14 +759,14 @@ def get_model_version_download_uri(self, name, version): :param version: Registered model version. :return: A single URI location that allows reads for downloading. """ - model_version = self.get_model_version(name, version) - return model_version.source + model_version = self._fetch_file_model_version_if_exists(name, version) + return model_version.storage_location or model_version.source def _get_all_registered_model_paths(self): self._check_root_dir() return list_subdirs(join(self.root_directory, FileStore.MODELS_FOLDER_NAME), full_path=True) - def _list_model_versions_under_path(self, path): + def _list_file_model_versions_under_path(self, path) -> List[FileModelVersion]: model_versions = [] model_version_dirs = list_all( path, @@ -729,12 +775,12 @@ def _list_model_versions_under_path(self, path): full_path=True, ) for directory in model_version_dirs: - model_versions.append(self._get_model_version_from_dir(directory)) + model_versions.append(self._get_file_model_version_from_dir(directory)) return model_versions def search_model_versions( self, filter_string=None, max_results=None, order_by=None, page_token=None - ): + ) -> List[ModelVersion]: """ Search for model versions in backend that satisfy the filter criteria. @@ -767,7 +813,10 @@ def search_model_versions( registered_model_paths = self._get_all_registered_model_paths() model_versions = [] for path in registered_model_paths: - model_versions.extend(self._list_model_versions_under_path(path)) + model_versions.extend( + file_mv.to_mlflow_entity() + for file_mv in self._list_file_model_versions_under_path(path) + ) filtered_mvs = SearchModelVersionUtils.filter(model_versions, filter_string) sorted_mvs = SearchModelVersionUtils.sort( @@ -784,10 +833,8 @@ def search_model_versions( return PagedList(paginated_mvs, next_page_token) def _get_registered_model_version_tag_path(self, name, version, tag_name): - _validate_model_name(name) - _validate_model_version(version) _validate_tag_name(tag_name) - self._fetch_model_version_if_exists(name, version) + self._fetch_file_model_version_if_exists(name, version) registered_model_version_path = self._get_model_version_dir(name, version) return os.path.join(registered_model_version_path, FileStore.TAGS_FOLDER_NAME, tag_name) @@ -845,7 +892,7 @@ def set_registered_model_alias(self, name, alias, version): :return: None """ alias_path = self._get_registered_model_alias_path(name, alias) - self._fetch_model_version_if_exists(name, version) + self._fetch_file_model_version_if_exists(name, version) make_containing_dirs(alias_path) write_to(alias_path, self._writeable_value(version)) updated_time = get_current_time_millis() @@ -865,7 +912,7 @@ def delete_registered_model_alias(self, name, alias): updated_time = get_current_time_millis() self._update_registered_model_last_updated_time(name, updated_time) - def get_model_version_by_alias(self, name, alias): + def get_model_version_by_alias(self, name, alias) -> ModelVersion: """ Get the model version instance by name and alias. @@ -905,6 +952,19 @@ def _read_helper(root, file_name, attempts_remaining=2): return _read_helper(root, file_name, attempts_remaining=retries) + def copy_model_version(self, src_mv, dst_name) -> ModelVersion: + """ + Copy a model version from one registered model to another as a new model version. + + :param src_mv: A :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the source model version. + :param dst_name: the name of the registered model to copy the model version to. If a + registered model with this name does not exist, it will be created. + :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the cloned model version. + """ + return self._copy_model_version_impl(src_mv, dst_name) + def _await_model_version_creation(self, mv, await_creation_for): """ Does not wait for the model version to become READY as a successful creation will diff --git a/mlflow/tracking/_model_registry/client.py b/mlflow/tracking/_model_registry/client.py index 9167b3aaef22e..2db189dadc169 100644 --- a/mlflow/tracking/_model_registry/client.py +++ b/mlflow/tracking/_model_registry/client.py @@ -205,6 +205,19 @@ def create_model_version( self.store._await_model_version_creation(mv, await_creation_for) return mv + def copy_model_version(self, src_mv, dst_name): + """ + Copy a model version from one registered model to another as a new model version. + + :param src_mv: A :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the source model version. + :param dst_name: the name of the registered model to copy the model version to. If a + registered model with this name does not exist, it will be created. + :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the cloned model version. + """ + return self.store.copy_model_version(src_mv=src_mv, dst_name=dst_name) + def update_model_version(self, name, version, description): """ Update metadata associated with a model version in backend. diff --git a/mlflow/tracking/client.py b/mlflow/tracking/client.py index f59ec61ff199f..8fb6d9df6e7f2 100644 --- a/mlflow/tracking/client.py +++ b/mlflow/tracking/client.py @@ -10,6 +10,7 @@ import posixpath import sys import tempfile +import urllib from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union import yaml @@ -20,6 +21,9 @@ from mlflow.entities.model_registry.model_version_stages import ALL_STAGES from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import FEATURE_DISABLED, RESOURCE_DOES_NOT_EXIST +from mlflow.store.artifact.utils.models import ( + get_model_name_and_version, +) from mlflow.store.entities.paged_list import PagedList from mlflow.store.model_registry import ( SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT, @@ -2667,6 +2671,29 @@ def create_model_version( await_creation_for=await_creation_for, ) + def copy_model_version(self, src_model_uri, dst_name) -> ModelVersion: + """ + Copy a model version from one registered model to another as a new model version. + + :param src_model_uri: the model URI of the model version to copy. This must be a model + registry URI with a `"models:/"` scheme (e.g., + `"models:/iris_model@champion"`). + :param dst_name: the name of the registered model to copy the model version to. If a + registered model with this name does not exist, it will be created. + :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the copied model version. + """ + if urllib.parse.urlparse(src_model_uri).scheme != "models": + raise MlflowException( + f"Unsupported source model URI: '{src_model_uri}'. The `copy_model_version` API " + "only copies models stored in the 'models:/' scheme." + ) + client = self._get_registry_client() + src_name, src_version = get_model_name_and_version(client, src_model_uri) + src_mv = client.get_model_version(src_name, src_version) + + return client.copy_model_version(src_mv=src_mv, dst_name=dst_name) + def update_model_version( self, name: str, version: str, description: Optional[str] = None ) -> ModelVersion: diff --git a/tests/store/model_registry/test_file_store.py b/tests/store/model_registry/test_file_store.py index 99a8a4b8149ef..d4c577fbfe5c5 100644 --- a/tests/store/model_registry/test_file_store.py +++ b/tests/store/model_registry/test_file_store.py @@ -1512,3 +1512,69 @@ def predict(self, context, model_input, params=None): mv2 = store.search_model_versions("name = 'model2'", max_results=10) assert len(mv2) == 1 assert mv2[0].name == "model2" + + +def test_copy_model_version(store): + name1 = "test_for_copy_MV1" + store.create_registered_model(name1) + src_tags = [ + ModelVersionTag("key", "value"), + ModelVersionTag("anotherKey", "some other value"), + ] + with mock.patch("time.time", return_value=456778): + src_mv = _create_model_version( + store, name1, tags=src_tags, run_link="dummylink", description="test description" + ) + + # Make some changes to the src MV that won't be copied over + store.transition_model_version_stage( + name1, src_mv.version, "Production", archive_existing_versions=False + ) + + name2 = "test_for_copy_MV2" + timestamp = time.time() + dst_mv = store.copy_model_version(src_mv, name2) + assert dst_mv.name == name2 + assert dst_mv.version == 1 + + copied_mv = store.get_model_version(dst_mv.name, dst_mv.version) + assert copied_mv.name == name2 + assert copied_mv.version == 1 + assert copied_mv.current_stage == "None" + assert copied_mv.creation_timestamp >= timestamp + assert copied_mv.last_updated_timestamp >= timestamp + assert copied_mv.description == "test description" + assert copied_mv.source == f"models:/{src_mv.name}/{src_mv.version}" + assert store.get_model_version_download_uri(dst_mv.name, dst_mv.version) == src_mv.source + assert copied_mv.run_link == "dummylink" + assert copied_mv.run_id == src_mv.run_id + assert copied_mv.status == "READY" + assert copied_mv.status_message is None + assert copied_mv.tags == {"key": "value", "anotherKey": "some other value"} + + # Copy a model version copy + double_copy_mv = store.copy_model_version(copied_mv, "test_for_copy_MV3") + assert double_copy_mv.source == f"models:/{copied_mv.name}/{copied_mv.version}" + assert store.get_model_version_download_uri(dst_mv.name, dst_mv.version) == src_mv.source + + +def test_writing_model_version_preserves_storage_location(store): + name = "test_storage_location_MV1" + source = "/special/source" + store.create_registered_model(name) + _create_model_version(store, name, source=source) + _create_model_version(store, name, source=source) + + # Run through all the operations that modify model versions and make sure that the + # `storage_location` property is not dropped. + store.transition_model_version_stage(name, 1, "Production", archive_existing_versions=False) + assert store._fetch_file_model_version_if_exists(name, 1).storage_location == source + store.update_model_version(name, 1, description="test description") + assert store._fetch_file_model_version_if_exists(name, 1).storage_location == source + store.transition_model_version_stage(name, 1, "Production", archive_existing_versions=True) + assert store._fetch_file_model_version_if_exists(name, 1).storage_location == source + store.rename_registered_model(name, "test_storage_location_new") + assert ( + store._fetch_file_model_version_if_exists("test_storage_location_new", 1).storage_location + == source + ) From b6e1f0dbfdff63148b0ed52ab083cf4d26da34de Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Mon, 23 Oct 2023 14:04:25 -0700 Subject: [PATCH 058/101] Use faithfulness instead of relevance in examples (#10077) Signed-off-by: Ann Zhang --- .../LLM Evaluation Examples -- RAG.ipynb | 174 ++++++++++-------- 1 file changed, 97 insertions(+), 77 deletions(-) diff --git a/examples/evaluation/LLM Evaluation Examples -- RAG.ipynb b/examples/evaluation/LLM Evaluation Examples -- RAG.ipynb index 1269c073d450e..9dfb44e79a8fe 100644 --- a/examples/evaluation/LLM Evaluation Examples -- RAG.ipynb +++ b/examples/evaluation/LLM Evaluation Examples -- RAG.ipynb @@ -19,7 +19,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 2, "metadata": {}, "outputs": [], "source": [ @@ -43,7 +43,7 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 3, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -63,7 +63,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 4, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -102,7 +102,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -127,7 +127,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 9, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -191,7 +191,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 10, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -231,7 +231,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 16, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -250,7 +250,7 @@ " {\n", " \"questions\": [\n", " \"What is MLflow?\",\n", - " \"How to run Mlflow.evalaute()?\",\n", + " \"How to run Mlflow.evaluate()?\",\n", " \"How to log_table()?\",\n", " \"How to load_table()?\",\n", " ],\n", @@ -270,12 +270,12 @@ } }, "source": [ - "Create a relevance metric" + "Create a faithfulness metric" ] }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 12, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -290,14 +290,14 @@ }, "outputs": [], "source": [ - "from mlflow.metrics.genai.metric_definitions import relevance\n", + "from mlflow.metrics.genai.metric_definitions import faithfulness\n", "\n", - "relevance_metric = relevance(model=\"openai:/gpt-4\")" + "faithfulness_metric = faithfulness(model=\"openai:/gpt-4\")" ] }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 17, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -315,26 +315,48 @@ "name": "stderr", "output_type": "stream", "text": [ - "2023/10/20 13:50:31 INFO mlflow.models.evaluation.base: Evaluating the model with the default evaluator.\n", + "2023/10/23 13:13:16 INFO mlflow.models.evaluation.base: Evaluating the model with the default evaluator.\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ "Number of requested results 4 is greater than number of elements in index 3, updating n_results = 3\n", "Number of requested results 4 is greater than number of elements in index 3, updating n_results = 3\n", "Number of requested results 4 is greater than number of elements in index 3, updating n_results = 3\n", "Number of requested results 4 is greater than number of elements in index 3, updating n_results = 3\n", - "2023/10/20 13:50:38 INFO mlflow.models.evaluation.default_evaluator: Evaluating builtin metrics: token_count\n", - "2023/10/20 13:50:38 INFO mlflow.metrics.metric_definitions: Computing token count metric:\n", - "2023/10/20 13:50:38 INFO mlflow.models.evaluation.default_evaluator: Evaluating builtin metrics: toxicity\n", - "2023/10/20 13:50:38 INFO mlflow.metrics.metric_definitions: Loading toxicity metric:\n", - "2023/10/20 13:50:38 INFO mlflow.metrics.metric_definitions: Computing toxicity metric:\n", - "2023/10/20 13:50:39 INFO mlflow.models.evaluation.default_evaluator: Evaluating builtin metrics: perplexity\n", - "2023/10/20 13:50:39 INFO mlflow.metrics.metric_definitions: Loading perplexity metric:\n", - "2023/10/20 13:50:39 INFO mlflow.metrics.metric_definitions: Computing perplexity metric:\n", "Using pad_token, but it is not set yet.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "a85dd3a9e9b84b71a61b5699b06a2775", + "model_id": "23e9a5f58f1b4930ac47c88259156e1d", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + " 0%| | 0/1 [00:00perplexity/v1/score\n", " flesch_kincaid_grade_level/v1/score\n", " ari_grade_level/v1/score\n", - " relevance/v1/score\n", - " relevance/v1/justification\n", + " faithfulness/v1/score\n", + " faithfulness/v1/justification\n", " \n", " \n", " \n", @@ -452,44 +472,44 @@ " MLflow is an open source platform for managin...\n", " What is MLflow?\n", " [{'lc_attributes': {}, 'lc_namespace': ['langc...\n", - " 4.571291\n", + " 3.970739\n", " 176\n", " 0.000208\n", " 28.626591\n", " 15.4\n", " 18.9\n", " 5\n", - " The output provides a comprehensive answer to ...\n", + " The output provided by the model is a detailed...\n", " \n", " \n", " 1\n", - " How to run Mlflow.evalaute()?\n", - " You can use the Mlflow.evaluate() function to...\n", - " How to run Mlflow.evalaute()?\n", + " How to run Mlflow.evaluate()?\n", + " \\n\\nYou can run Mlflow.evaluate() by using the...\n", + " How to run Mlflow.evaluate()?\n", " [{'lc_attributes': {}, 'lc_namespace': ['langc...\n", - " 1.253230\n", - " 48\n", - " 0.000263\n", - " 21.149670\n", - " 7.0\n", - " 6.0\n", + " 1.083653\n", + " 39\n", + " 0.000179\n", + " 44.533493\n", + " 4.7\n", + " 4.5\n", " 5\n", - " The output provides a comprehensive answer to ...\n", + " The output states that \"You can run Mlflow.eva...\n", " \n", " \n", " 2\n", " How to log_table()?\n", - " log_table() is a function that is part of the...\n", + " \\n\\nYou can use the log_table() function in ML...\n", " How to log_table()?\n", " [{'lc_attributes': {}, 'lc_namespace': ['langc...\n", - " 1.217061\n", - " 47\n", - " 0.000145\n", - " 23.411400\n", - " 7.1\n", - " 7.7\n", - " 5\n", - " The output provides a comprehensive answer to ...\n", + " 2.833117\n", + " 114\n", + " 0.000564\n", + " 13.269521\n", + " 7.9\n", + " 8.8\n", + " 1\n", + " The output provides a detailed explanation of ...\n", " \n", " \n", " 3\n", @@ -497,14 +517,14 @@ " load_table() is not a function in MLflow.\n", " How to load_table()?\n", " [{'lc_attributes': {}, 'lc_namespace': ['langc...\n", - " 0.680665\n", + " 3.736170\n", " 11\n", " 0.000144\n", " 193.916275\n", " 2.5\n", " 5.6\n", " 5\n", - " The output directly and accurately answers the...\n", + " The output states that \"load_table() is not a ...\n", " \n", " \n", "\n", @@ -513,48 +533,48 @@ "text/plain": [ " questions \\\n", "0 What is MLflow? \n", - "1 How to run Mlflow.evalaute()? \n", + "1 How to run Mlflow.evaluate()? \n", "2 How to log_table()? \n", "3 How to load_table()? \n", "\n", " outputs \\\n", "0 MLflow is an open source platform for managin... \n", - "1 You can use the Mlflow.evaluate() function to... \n", - "2 log_table() is a function that is part of the... \n", + "1 \\n\\nYou can run Mlflow.evaluate() by using the... \n", + "2 \\n\\nYou can use the log_table() function in ML... \n", "3 load_table() is not a function in MLflow. \n", "\n", " query \\\n", "0 What is MLflow? \n", - "1 How to run Mlflow.evalaute()? \n", + "1 How to run Mlflow.evaluate()? \n", "2 How to log_table()? \n", "3 How to load_table()? \n", "\n", " source_documents latency token_count \\\n", - "0 [{'lc_attributes': {}, 'lc_namespace': ['langc... 4.571291 176 \n", - "1 [{'lc_attributes': {}, 'lc_namespace': ['langc... 1.253230 48 \n", - "2 [{'lc_attributes': {}, 'lc_namespace': ['langc... 1.217061 47 \n", - "3 [{'lc_attributes': {}, 'lc_namespace': ['langc... 0.680665 11 \n", + "0 [{'lc_attributes': {}, 'lc_namespace': ['langc... 3.970739 176 \n", + "1 [{'lc_attributes': {}, 'lc_namespace': ['langc... 1.083653 39 \n", + "2 [{'lc_attributes': {}, 'lc_namespace': ['langc... 2.833117 114 \n", + "3 [{'lc_attributes': {}, 'lc_namespace': ['langc... 3.736170 11 \n", "\n", " toxicity/v1/score perplexity/v1/score \\\n", "0 0.000208 28.626591 \n", - "1 0.000263 21.149670 \n", - "2 0.000145 23.411400 \n", + "1 0.000179 44.533493 \n", + "2 0.000564 13.269521 \n", "3 0.000144 193.916275 \n", "\n", " flesch_kincaid_grade_level/v1/score ari_grade_level/v1/score \\\n", "0 15.4 18.9 \n", - "1 7.0 6.0 \n", - "2 7.1 7.7 \n", + "1 4.7 4.5 \n", + "2 7.9 8.8 \n", "3 2.5 5.6 \n", "\n", - " relevance/v1/score relevance/v1/justification \n", - "0 5 The output provides a comprehensive answer to ... \n", - "1 5 The output provides a comprehensive answer to ... \n", - "2 5 The output provides a comprehensive answer to ... \n", - "3 5 The output directly and accurately answers the... " + " faithfulness/v1/score faithfulness/v1/justification \n", + "0 5 The output provided by the model is a detailed... \n", + "1 5 The output states that \"You can run Mlflow.eva... \n", + "2 1 The output provides a detailed explanation of ... \n", + "3 5 The output states that \"load_table() is not a ... " ] }, - "execution_count": 13, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -596,7 +616,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.9.13" + "version": "3.8.17" } }, "nbformat": 4, From a7cda1c8af5b30329d79b3b470c52e6dab5ec3d6 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Mon, 23 Oct 2023 17:34:59 -0700 Subject: [PATCH 059/101] Add answer relevance as a builtin metric (#10071) Signed-off-by: Ann Zhang --- docs/source/python_api/mlflow.metrics.rst | 4 +- mlflow/metrics/__init__.py | 2 + mlflow/metrics/genai/metric_definitions.py | 61 +++++++++++++++++++++ mlflow/metrics/genai/prompts/v1.py | 63 ++++++++++++++++++++++ tests/metrics/genai/test_genai_metrics.py | 63 ++++++++++++++++++++++ 5 files changed, 192 insertions(+), 1 deletion(-) diff --git a/docs/source/python_api/mlflow.metrics.rst b/docs/source/python_api/mlflow.metrics.rst index 5930368faa7ab..3fa246f1bb4c5 100644 --- a/docs/source/python_api/mlflow.metrics.rst +++ b/docs/source/python_api/mlflow.metrics.rst @@ -106,6 +106,8 @@ We provide the following pre-canned "intelligent" :py:class:`EvaluationMetric ` using the :py:func:`make_genai_metric ` factory function. .. autofunction:: mlflow.metrics.make_genai_metric @@ -118,4 +120,4 @@ When using LLM based :py:class:`EvaluationMetric EvaluationMetric: "answer_similarity", "faithfulness", "answer_correctness", + "answer_relevance", "token_count", "latency", ] diff --git a/mlflow/metrics/genai/metric_definitions.py b/mlflow/metrics/genai/metric_definitions.py index 2977adbeaa12d..1994853942d0e 100644 --- a/mlflow/metrics/genai/metric_definitions.py +++ b/mlflow/metrics/genai/metric_definitions.py @@ -207,3 +207,64 @@ def faithfulness( greater_is_better=True, judge_request_timeout=judge_request_timeout, ) + + +@experimental +def answer_relevance( + model: Optional[str] = None, + metric_version: Optional[str] = _get_latest_metric_version(), + examples: Optional[List[EvaluationExample]] = None, + judge_request_timeout=60, +) -> EvaluationMetric: + """ + This function will create a genai metric used to evaluate the answer relevance of an LLM + using the model provided. Answer relevance will be assessed based on the appropriateness and + applicability of the output with respect to the input. + + An MlflowException will be raised if the specified version for this metric does not exist. + + :param model: (Optional) The model that will be used to evaluate this metric. Defaults to + gpt-4. Your use of a third party LLM service (e.g., OpenAI) for evaluation may + be subject to and governed by the LLM service's terms of use. + :param metric_version: (Optional) The version of the answer relevance metric to use. + Defaults to the latest version. + :param examples: (Optional) Provide a list of examples to help the judge model evaluate the + answer relevance. It is highly recommended to add examples to be used as a reference to + evaluate the new results. + :param judge_request_timeout: (Optional) The timeout in seconds for the judge API request. + Defaults to 60 seconds. + :return: A metric object + """ + class_name = f"mlflow.metrics.genai.prompts.{metric_version}.AnswerRelevanceMetric" + try: + answer_relevance_class_module = _get_class_from_string(class_name) + except ModuleNotFoundError: + raise MlflowException( + f"Failed to find answer relevance metric for version {metric_version}." + f" Please check the version", + error_code=INVALID_PARAMETER_VALUE, + ) from None + except Exception as e: + raise MlflowException( + f"Failed to construct answer relevance metric {metric_version}. Error: {e!r}", + error_code=INTERNAL_ERROR, + ) from None + + if examples is None: + examples = answer_relevance_class_module.default_examples + if model is None: + model = answer_relevance_class_module.default_model + + return make_genai_metric( + name="answer_relevance", + definition=answer_relevance_class_module.definition, + grading_prompt=answer_relevance_class_module.grading_prompt, + examples=examples, + version=metric_version, + model=model, + grading_context_columns=answer_relevance_class_module.grading_context_columns, + parameters=answer_relevance_class_module.parameters, + aggregations=["mean", "variance", "p90"], + greater_is_better=True, + judge_request_timeout=judge_request_timeout, + ) diff --git a/mlflow/metrics/genai/prompts/v1.py b/mlflow/metrics/genai/prompts/v1.py index 3da6459812a35..dd28204ad0899 100644 --- a/mlflow/metrics/genai/prompts/v1.py +++ b/mlflow/metrics/genai/prompts/v1.py @@ -298,3 +298,66 @@ class AnswerCorrectnessMetric: ) default_examples = [example_score_2, example_score_4] + + +@dataclass +class AnswerRelevanceMetric: + definition = ( + "Answer relevance measures the appropriateness and applicability of the output with " + "respect to the input. Scores should reflect the extent to which the output directly " + "addresses the question provided in the input, and give lower scores for incomplete or " + "redundant output." + ) + + grading_prompt = ( + "Answer relevance: Please give a score from 1-5 based on the degree of relevance to the " + "input, where the lowest and highest scores are defined as follows:" + "- Score 1: the output doesn't mention anything about the question or is completely " + "irrelevant to the input.\n" + "- Score 5: the output addresses all aspects of the question and all parts of the output " + "are meaningful and relevant to the question." + ) + + grading_context_columns = ["context"] + parameters = default_parameters + default_model = default_model + + example_score_2 = EvaluationExample( + input="How is MLflow related to Databricks?", + output="Databricks is a company that specializes in big data and machine learning " + "solutions.", + score=2, + justification="The output provided by the model does give some information about " + "Databricks, which is part of the input question. However, it does not address the main " + "point of the question, which is the relationship between MLflow and Databricks. " + "Therefore, while the output is not completely irrelevant, it does not fully answer the " + "question, leading to a lower score.", + grading_context={ + "context": "MLflow is an open-source platform for managing the end-to-end machine " + "learning (ML) lifecycle. It was developed by Databricks, a company that specializes " + "in big data and machine learning solutions. MLflow is designed to address the " + "challenges that data scientists and machine learning engineers face when developing, " + "training, and deploying machine learning models." + }, + ) + + example_score_5 = EvaluationExample( + input="How is MLflow related to Databricks?", + output="MLflow is a product created by Databricks to enhance the efficiency of machine " + "learning processes.", + score=5, + justification="The output directly addresses the input question by explaining the " + "relationship between MLflow and Databricks. It provides a clear and concise answer that " + "MLflow is a product created by Databricks, and also adds relevant information about the " + "purpose of MLflow, which is to enhance the efficiency of machine learning processes. " + "Therefore, the output is highly relevant to the input and deserves a full score.", + grading_context={ + "context": "MLflow is an open-source platform for managing the end-to-end machine " + "learning (ML) lifecycle. It was developed by Databricks, a company that specializes " + "in big data and machine learning solutions. MLflow is designed to address the " + "challenges that data scientists and machine learning engineers face when developing, " + "training, and deploying machine learning models." + }, + ) + + default_examples = [example_score_2, example_score_5] diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index 148b0817d4caa..b4800327cd59c 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -16,11 +16,13 @@ ) from mlflow.metrics.genai.metric_definitions import ( answer_correctness, + answer_relevance, answer_similarity, faithfulness, ) from mlflow.metrics.genai.prompts.v1 import ( AnswerCorrectnessMetric, + AnswerRelevanceMetric, AnswerSimilarityMetric, FaithfulnessMetric, ) @@ -707,6 +709,67 @@ def test_answer_correctness_metric(): answer_correctness(metric_version="non-existent-version") +def test_answer_relevance_metric(): + answer_relevance_metric = answer_relevance(model="gateway:/gpt-3.5-turbo", examples=[]) + input = "What is MLflow?" + + with mock.patch.object( + model_utils, + "score_model_on_payload", + return_value=properly_formatted_openai_response1, + ) as mock_predict_function: + metric_value = answer_relevance_metric.eval_fn( + pd.Series([mlflow_prediction]), + {}, + pd.Series([input]), + pd.Series([mlflow_ground_truth]), + ) + assert mock_predict_function.call_count == 1 + assert mock_predict_function.call_args[0][0] == "gateway:/gpt-3.5-turbo" + assert mock_predict_function.call_args[0][1] == { + "prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was " + "sent to a machine\nlearning model, and you will be given an output that the model " + "produced. You\nmay also be given additional information that was used by the model " + "to generate the output.\n\nYour task is to determine a numerical score called " + "answer_relevance based on the input and output.\nA definition of " + "answer_relevance and a grading rubric are provided below.\nYou must use the " + "grading rubric to determine your score. You must also justify your score." + "\n\nExamples could be included below for reference. Make sure to use them as " + "references and to\nunderstand them before completing the task.\n" + f"\nInput:\n{input}\n" + f"\nOutput:\n{mlflow_prediction}\n" + "\nAdditional information used by the model:\nkey: context\nvalue:\n" + f"{mlflow_ground_truth}\n" + f"\nMetric definition:\n{AnswerRelevanceMetric.definition}\n" + f"\nGrading rubric:\n{AnswerRelevanceMetric.grading_prompt}\n" + "\n\n" + "\nYou must return the following fields in your response one below the other:\nscore: " + "Your numerical score for the model's answer_relevance based on the " + "rubric\njustification: Your step-by-step reasoning about the model's " + "answer_relevance score\n ", + **AnswerRelevanceMetric.parameters, + } + + assert metric_value.scores == [3] + assert metric_value.justifications == [openai_justification1] + + assert metric_value.aggregate_results == { + "mean": 3, + "variance": 0, + "p90": 3, + } + + with pytest.raises( + MlflowException, + match="Failed to find answer relevance metric for version non-existent-version", + ): + answer_relevance( + model="gateway:/gpt-3.5-turbo", + metric_version="non-existent-version", + examples=[mlflow_example], + ) + + def test_make_genai_metric_metric_details(): custom_metric = make_genai_metric( name="correctness", From 87c38437d7389418b83d91e7356edfaa79c87f40 Mon Sep 17 00:00:00 2001 From: Chen Qian Date: Mon, 23 Oct 2023 20:41:35 -0700 Subject: [PATCH 060/101] Add guide for LLM eval (#10058) Signed-off-by: chenmoneygithub Signed-off-by: Chen Qian Co-authored-by: Ben Wilson <39283302+BenWilson2@users.noreply.github.com> --- .../images/llm_evaluate_experiment_view.png | Bin 0 -> 420723 bytes docs/source/index.rst | 1 + docs/source/llms/llm-evaluate/index.rst | 538 ++++++++++++++++++ 3 files changed, 539 insertions(+) create mode 100644 docs/source/_static/images/llm_evaluate_experiment_view.png create mode 100644 docs/source/llms/llm-evaluate/index.rst diff --git a/docs/source/_static/images/llm_evaluate_experiment_view.png b/docs/source/_static/images/llm_evaluate_experiment_view.png new file mode 100644 index 0000000000000000000000000000000000000000..17278ca6f13f0a1ea6a2292d8adb845511740c43 GIT binary patch literal 420723 zcmeFZcRXD0x<0IgD3M4JK|~7@y#&#T77-Ko@7BMs`4HL5XGtlr2GRgx2W)0xc?n=JA^(c{G} z_qZat_^$Nr;DUwVt&6SmL99chA-IQ0$^@s0ew>Q<$@=C+g#T;M3PjLf(zf;6vtSzl=1 z>hOG@N#VZ0rYp+!v0D5Ev8c9ThE$N+`-OM&Lvr~t`kxdRBhdlj8C?7&@ zRcRAK&V2d(bO62o@60OBOPe9LtO!|Lbp?sp;%TH``$k{>`CIe-yX;r4CfT_LWFMH% zyyEoC808CcVNV5FX{ACRGbx5h`SR;T-MHqjMik4!b`;@ruPpG6%e}Jg)8N5tl}I{K zD^|TI$8T&pmfzy}FncHPKCahWhcm^UyVh{fMktmfjaXc?T(W zI5BPgzat8Bs=hqd(fURGjNs?q$|ui&Mtt@MDY)#i3LU8PRjc@0X<7WraU7V?ku9jM zW7!h_Rd|vdeb@UEh~Rk{9jIfv@{7Aved9fDeRv4$v657Smdft6U}8=EtRA8jqZQ{B z{$H(6{#YGK(z|jzEqJPAI2HC>^q7?_)kF|!e`oaP)K40M!+oX0BmbMF2hPm?PRIuF zV1nO>86-0JK$3Ru$~QC3`jczm6-UF((HzomQwo8pHBWo~Y*P}pvOoH*AxyyWjcBs{ zu%6)BZ(<@j0kX@(%!GL%ZURS+^#l2@ZlsXfyw%paniP`wig%Jg^eyHU?c`uCL_;bxza%(s8mLMxr4-lCFrTZjmQp`hfgKq!`@$~ z6QW9e`cxhRV(KE(WX^GN@a%5W$ z^LVlvMGYv?=9CUQcyb!WX36nq!-q6>Nv0zRpHq$g=o3(h3UjNp?S|sY2+3~h?lXVs6QPrQQl%fy-a-X6zP$YM#ImhZDf*uJhk{xTm4rfZ zg=+1cxMiw-8Q$;-GN>Ii5E^DASuz3mZ6H;nqQA01Hy^$i{xv+|3Y;NJ z2u)WaJZAeK*lubFtl+|sqr9i=rmPb!%1}#(qf5UNL9Z4CzXN%+Avi2dF3=-rXyM+M zhxlxc<*t@xHvB zdYhZah{vB-oma(Bqq?lN*-7c^YeQ>eXqAK|Zf?3$V#+_u>n%>00nAXY{#6~?Z)%(d zZNn%c91x}6ijqmD<+Bn^YI{X3P2f6k{)y7%vF|_R7b^#HEbmyPzy-D{*Db{Ejx$ch zD8J0k zOv@WbOcYFP8$}yi-921yHbQ-x)&^XemShH7ex8&@6h@4F8Tf)29TA?b{w&vA=`T~pa$=R2|wguh=yMw(Cq!{dlwI93pcB`~5w@M+^TQh^6w|TZj z2W6hxoJyTJoJA1cCVUeDCDfeqM&I4j*z1*Bk+c8({oDRsmjTa#EoPnvkBWGTq+d!t z7W8%6JfYr+D~p?so737KD$eQpL1uB?LhXmMYji`PGZjp1MmW&l7xd0wF|cF$eZ;4| zPb1w0FII~-U-YWXMpu2fp(3LA*3Nl%do@5%RPd(d)vgkneTDZK^O9*Y}Y)_JAf4*=$?)x+1rfTXa)#mN`ptQ zSSfc#C8gdLUKv2A(MW*sj_c@|skY?D$voVClAL}Tqb3Ut)ypzR?7PX++Wy5gyPZ-H zKZfOn9i<%vZ9U)1>fgqx+33Iq4?m^D0DOzKUxv&0*ht6%NBS8I%}YoGE5?&7Dk5zoynV*zF|nsu~X*cOF4) zO8-o3L(O54FfZp=#GKKBp-%M=yRLbo8hw34i?Gv5zNvf@J?bt>6f=P^ZM}^Cgz;+? z17mUE>v*HR>ODAPrG%DOoLgnty<&(q5)n5$G?g+k>Gt4QOD#cj^!pEu2pFTFq_&^# z{_4Etn59Q7PK;J;pE3C^R{WFK{EOa$BuP#cNb@v3VmgRxer#%#rBE}=0%9+N3fSuw z$9hBzR1b)mY}RWbm*>1UefE*p>H=!u=DW?4zC9>yov!W}%j6S&Nc|1ZrCq&^aY}e@ zxtED%bI^|Rbo4;_oAg2FrCEQkrq#U_$?A<88%eXH%?57B)gT;`cQATmcU9_$c~u_q z;{bp!TTCs!CYuLp@EOh;P8v;h$b^)KpLd>Q9)4s;aInejsafF0!xJ6UU`cWv|FvV9 zO#@@>>Rh?Oh`~t(ZL6SfH3o`6B(})s)Dqw_iYJ?$#gSKOWzjh$G%)f+7i9*lbCE6+~ zZ4qQ&J#FjS`_`7uK!BbiiZ$0VXQK+aLr*|%M&RYeSVn9s^0PtP?EU85O2IvlauDf( zn3CV}G0O(TnAyOKSQd6hlz2PPyb~@L4x|)G)-P3U)YS>NfNe4Y62cn<#K0CI@Jm2Q zPeAh5HUYsiLWcj`)+T)L_hX0%2qNqWF8%!&BjEk~Cl2@nTL1l>I6j=p~FFjwXKbNp{apJSEa(Qjd=j-HpejWlTUkPB-$=cI`+1JU@*+artn)R$|t}lz$!z*%*-t1Ze=5(t*HF>=D>f_tahHB zt`hwGK0ZEtKEix1?za4b;^N}`0z&*kLcG8cydHkeo)*5m&K_)kJIQ~XN735D(%s(G z)856I`TV>VuU))6rCC|eJNnO`zg?%bul;}Zsb~+h z^Z@QjMp!`j(chc>_mBR^riTC3R8UM%^dFo4!$<$n^!z3yw5>f{9KFs5QP0`lQ$|RN z|9{&3-&?)>uVylWj~?+0{=L~h?*H#?4F3;p{&D|*Z=>mM4~(P5`Q^#{M<4&V@9)P; z@t+&OKN!T{tnIJ8fVs&~Nb&!t>B>-uv%>`m2;>M<73Fn(3DL8c18-TMc5N}S)C>hY z7y%L3vzmS&dr zIpHO0If4tku-FZzU!>h%-K7)A2QZL+ywGg}*G^rtB7K1Y`)iy-)I+jcsNO1F@U}0J zP4C>h(0~CQe&diEnhXEVdeKR{gz>v}5lcD0@mDvQ?OcAmx_*JF{%fe->@{D$n5A5z zt|X_ffmu|(|7!v+@FE{DM|@FRnFMAGA@EZ1g0>oxEseeCkX>>lx`c7M!^3((Ta_Ei z+P}bP{mr^W3E%9AUb%h!g0{+RHyCiiAtNCC-{pOw8UNqq{qH-s|6Se}@=pK%mv?8r zNmQHf3yuLKANH)~nKzRR_@QQ)CEJB8U)+fKSljgEU@hGW`AL|CvGw^^B6pQSBbMm? zlDD^Ar0)M{Gjz^6-6Q5)!r+aq$JPEcU&yhA^6()5%JZ==;U9M1QwHRC4rvkdX zq;~r=zHvNJBbwXDlrWZl@7@W@=x}rr(o$PwJTWQy%t7yg16DK{8x&CBI0Js(2rJ?d zt1aPTi&`*(5B>0!sj*cMXJX)FWXiOPVR8p4G!%R3A%oC}{S7Uhxf6?go@v)1SgTjd zQ%k1kJkR0dDx0qBe%P+@(xW3sTcRd6q({*%K& z+FrCCzAPsww&@xq=`u2%k@^8#o^paQE&>T*ljxj!q*&o(zDu`LuirfSreJ{c;*7k`zQ*3}j*pJmEjAye*oqBq7GSoj zc$Vn)@HPA-7;;`~UeOqB2?%89s8Y5MK}S?CQYUxhrn)c`IO zCi3(YWiHvj6qE^4Yx%`)vefn5kpKE&dp=U6Hs3H)$bFdX&zZIE`?JmD+U0}7PM*(S zW4%7FFMT+ulXY8emU4KvGnnyF1b+{0rkOU*$D6hY|BVmkfC|E?PO(ylzOIrFZ+wEo zptY4&I!}g9!jts!8r){w*;((i2HyjD#mCYw!wV9q%8T@j9z~T~>{%;+GvnR`O`0Lv zu0WEM!@!yEHhbF{w{c~>Hf=JZB6Zbiq)7A3$}!qL_|E!RUhZeEcyoOhSym=ZGu!YP z$muTsVRwCioN3oAv8U{WxIONO8}&JHK5V<^12n?=0N$$}3!z2F86)%)=$Ae|OMOkU zh_IDNkAOE_wL`i8wB8;Bq0)N89B=E~#t31Fvbi_Vx@!J(;~Q+-5$Rx{7nig_ldte* zVY#;@?ERQgl&n5UKxu9Z&JI@ycbnqBNbY4qAF6h*23d(pF;tS7TtZHiH!AA!Z7}41 z$B38irxJQlrNP>on?G|I$;}8}YAF{e9U(dZZVTOi3H@aitSvuZYmZHm}H&hhg z+Kr}ndbfc*u%!gm1|D{Cn3EjSZa8unh?V~~HYk1aLOA1eq@LZT78kh#!?&Nc$_Mjb z!AxoS*OaWIuO1${E@T*ue|*%H%x|bQ*I1Ena@J>sycELe;=(hVX0V6RRM;wAKv%6k{}tODTY@*Yt)znXD7O} zfd53AMdt^6?6CB9FnO`Pc=aaLD#*AN^PaBen()=K1WGj2HB^ca+&Cw}59Y<9ZZ9De zIW?t%yJ&KKgda08@Jn9HOxnFVjrtDNYN4ru-Gc@p$*V2|3JlGD$6v=$M)u0^C$;S0sc z_o8^2A7l_vqHn-xWqqCgi%-D11x4xGoH-BZ8^U&q%4Xfh*7pL&xQ_hMpGBd!UBb{% z@M3QQqUgT4a!q4XU1@Y&Q?;oXG>@*v00iwbuX**P_Qb36{^e3>cO7ET@czsGqScD= zMOjGQi-|il8?{8O;Y%3xinYe&M6~I$d-~B*Ah;>o0pH^^;TDtem{Ucg^J!%JA-HL1 zCm6S}+`7W^?_=Kc7WRJycKWdmyV9#zZ#Scc*(uhKDk(IRwI7LIV37BRc`uHM zR~t+3DMJKHPZ3=rMMmwoGp?CXRO2^*BlRy^3}VQi)zbp8k5YW}!O+ z@44<3&4nk4+JCMr27TqC_Z4mlVh<>pvfVBHwsirnq5Do~lqQd4d1>ACjG$ZtOBEM8 z&DsygWr+dSdnMpyQZH_rWW=3W(c!U?Z&=@KRrZYQhk>x^xp znCxocMGhDL74T0@s*xGH0zpR{!~9P4^;mCfc6(De@a>(RlRGv zvbsfECiyBzq)cBs9Es4TSk`RYVYPz1sCe8Z|L@ZR%sXUx=1;_l%iX4{W`!&d?;$@u zqCVYP^x8fLA7E!n#6IM+gfcrAR@Yzj+NIXvlTzPLt~!c?eXo!4l9J<9;Z{>BnOZ|N zHui>9YjKB<-Q`fveR%z206hJu?Oq%7E8PL)>+9G+v4GN5Q(}F`=p_CEr_YxgRq}fq zZdPg1>U)3q_aUQP0WDeYuI;MZ8vs#sZ&oCygPKg-QqPf8!!dvXsshz&y7b9sOJPh~ zCT$MJ?Gn&Avl6=}*8b3PGnlrUN4l3}jK6dGxc))8{mKhBeav7HC^CAd%{T<^hU2)D z`0G}D5PU3YOVfLsYBZ8)Z5XhN2rm2us48X^2dHJGydx>i;LoL!QwTOR-Eg% zWqj^n_giegU|Z0!hw-s&Wd?$UJi_J~hJ$N|$?KVUe6!>EUr4NUdQGzSV*+-*8PKI?>78?d>d$LwXJH->@ zg?_SH!EdKcW-4=X;6BM%l zezn@XBHs`rq@C;9t{7fZU$?&Wu{)`S(+Yo@AY`-dZ?D6z{fw6TRcn&9+O0Ut@ao3$ z>D#=t(eOfR-8{wHv2hArgJ}!<^rzG1q61>mY0@uU%r#;yziM8DsqQv8cN7&C>lTm` z8eJuYDN81dUCflnLCuvjn}&v&dgX^}(>#J>bDd-{=HuBJeP*?qH4Z0w$;k=a(INHK zxQ51pD7@{!0n96N$?n?7obx;tca?~XX4EJ^k#xi$y|<@7&BAmEi* zHtSuRGZx#g+qkX2;j8gW^EbjrCqWou^a;%y3%b0s!9tfY zBF`BedKr^^)3AYU?XXn+GM^0E& zbtQatRV(z%-#71TG1$L=m4Ssl4EJ$)*Oh~Z$<{z2vvckd>?Wl+J9ArJ1mAEyN2%DK z;pqYYU32amr~92|C=W#0kGqomDY7PW4Q+`tLscIdEOQ)Js0nR!Qi-(Hr|6YCdK-|t30f>CrG*lzN^E6N%xbZR`(lUq!@Hll z0DSlsZKAYwia*LwCY#N>w*%GIx=-K~1^WAP|)reJ)o z^s{yZ_a;k`;RQUJ9{$DMaNdOTUCrBUYN|OBb52?l(j(;3kuxG?+AHmUtoh*6MJ{2OxRu zp2|e8T*4%%camOY8sbbZg$F*?yj?DvV#*hR61bEGf5_&7L~`lfwkV!Xi@pGo1uTn) zD?1TQK4m*h%inE}0+m!aInc}F$5y}cUSz1PR71xRvE2Z`vF-6#$xFCB1lKoT_$>Nn z?_r{)Lgi(_b3 z{4Zq3gqPj`r4-kG{dji4(aQj;0)XM4H@Ofu#T+811zf+p-^<90jb7X+ppdWB7-XQ7 zE@0Zgl#_8$-}B_<_Gp$fG0AX5wwlH;$?}OfjK#4keOUCpNpJz&CdV8=3E0G3g-c1f z^Zv%H`~Iv?Z;4)MAl?r*Zjc{*cG|i^_tkZKQPmLOy1(U=7*z1FKl@C!T1ZUDA38bt z(y-1kwal!=DCVBDg9dX?0%xqEqTXfq6p== zANNNd|B;se*Xr;Wb`B1X*%U7QvInpkceO^0Y$4irC|e%jrCK%$v;3dYi#k3~PkYoO z5WY5A*h_p(irTe#1rltw(ROwk5+p%H5_Ezyn5wbY`AzYq(4fNX1|8j6BcFIC2wt9u z3_hFnN=V{0{*`E8MpvmJOF57wjeJKXN&Cl)Q$ho+eX zpWp~jdN7;QZ5xHp61m(LqQy9TlFJvmqUhKpK6OvE|M;=gir-Lk+mxK#Z`yg8BUNG2 zP_?@{%=S4aHK?!q6Pq=9@+;wrxZk$baHTXao-9zvx+PFT#5LaRS$Sn}vV^-PT;k!> zXQzM7b^w`6z+vpcRF(Bd>~z^TX^Q|Ra>^-1<{xkOwzjr%3D6&=I7_FHh+IFN5zkdzwRJQePkbx;vgoy;z~6w zyFcSmzJSx9INVEFt~Z|9 z^rz*k4$XniCP=){lP@S}`5dLyhreh%_d8lEl!jptu!>u}Mq?h}N}BJj$hS@_wHm~$ zQ|xwFujpM{+0Yt_snV#8j{x`yh;>!^m%?_O5FynhpnOc2ks*w4NZD|PUb!}H%PG}z z`=s(^YoOmZYkLyx_EPWVUE9Q~Eju9wlyw6jh9lvMZ-tN`F5=5qZ$Wc2N4$T3BOHta zKif~$M?j5XF>zociz+6aSMwc_BZZm}UHiZj9K3isEBZQ_-mC9#NUCfH@3sADa9@gd zo^8xud&kj6y!o%f7iI*7<6N^i!VHn$TuF4;zJzX=ysw#~r<+#;vRh1u&cLQ4%i|{N z5AMS(rv}=XVtI{ymxfTyq1DNxi`D&&IV5 zG(##X6Eu$l4=N2Yt-#uR*!mvZb|&)^QLrE0o5V}$|K6z9A$7b5SUZfKce$@uUh0ft zOfSi9nG>`+KG>Sr@Kt0sC7BD{QBMVy&^N(d4U+K^J<8R=7)B2y=2LF3gYatMEvGK! ze`#_;obx3^8j>^%akbPZksah$0i_Hp_VPL%-I z++kLAob;v6Xa>%QRMTBuUA=(&$g*eAFMIjN-`TXun+6<93z(gkbS9ZEd6`k|GyMuP zUK|R(Pk*B(czdzOeSNGr+w$LvCRWO!e5Q>lg*b#u+hL{|Zhc&-%}$kacjKkbx$YF@ z*kazBAo-jtiOVji)=5j_UZ6{z<5WPq%N0!_{qt37VPP=>b1@eWmN~fc0dRaD^{2h- zii(Qv|A-dPe^NTi0fiV8YbzC~r*ohtpu#?;b&iI^{6>2>Q-rKPppk9ntZZxvV7Hx> z!DL2B@B0BwQPZ!s=6{w)&p95tM`k6h(em7 zsm&+?nt1L&hWgjTKl5es$3ONmC4Fv}@?RFrTh%@r{}Q_Eko=hC50E@pjgyTsJ@%d` zb2hJj@`7B9Xde*lmPT_J^H$W_QuQ`;JOxCb)yjom#(2<;@&oqOe_!)>tq_tVXxVuw zs%a%FAkPOoM|k3y7b}+~<+lhpUQI}5HT*gIX1^G+`&%e+0$5|AD^_z)l&<0XbS;SZ zszHhFL;L9(dpTJ~vUgnBvXCtQ^@k5%R=z4qPgxLkn!Zm-%lCXj)L!J^YuCL5U`O8g z#^klX<7_MUP|>$R6l+&jS`ln{Zdd*}#6=U)mvNf7(5m_y67Rz5pvw%8_x7o_hbzc` zRze4INp^k(lbW%g6PyBU9$+7Ma zlaZE(@g(^zZl?g39slM~xLLFr*^sxZIw6I+U%m%~7@^l4??Zn30(PGtu)iS>lkr;r z+`KmpB1DKfj6L z>E1^M^9LVqMVwQ3IY3KRIgG0YjqkOa6qzvWch z`%+RSHiKEbz<}qJnKWo$Ws-4>8AbT4Hh=)%i?!;BXJ4+uF;Q{Ko*pb{=RLh%+KY1o zBC!zo$TL;cDGhM%hGXLnVH7k%75RV@&NT<%%~yTXDix|mP@+9_z11J*FjeQ2j?KUY za_g1c_gWwOWbEAVGmvVG|7DZwpEu3BBWk&^d-|pNylcp|wyFHC17Jm1|N8tes3M^i zo|~JyxL#V3geq?uX!UF4!y;gs%LAF*nCV)|!^f$DRuxDeb1+VC2=ubX?vvEvVnUGR zG|->T9Q~yUI4<}M4{@BTQWABZS(QxEA3aAZtEfOArl_490hJLMl)^TBap$ORiWiLE zO9Td-WY?Hc!u~bjZzptiUbOi@f=~U1n{?|%GKY3%JO)Xg*8zS$d^J+mn`3%(kg9HWY#~E5A}5A0KP1ERaz zj^O~?u>B2(Q{<$w`w4d1Z;@1+*pPDILj`YOZ?m^d8|)+6_OOqAwX=ZYgTb8a`p60M)spuZY=d)I>{i5Ak2te@+pml@ny!3* zdl}GrL$GYT$F^W|{-q-Kf4?BEl^Fg$l_mnRA;oyjRu9UcOq^t3 zbCO3vL%)kPjvp9S<)J2PHtvc!n{>+uWM*WCZuCv;)>14rXISp~YfFBWFV#HTZ#xq~ z!uNXEz1gS+){`|}14~1*Pi}ql&yW7!E#M43>~w7}&Ikyt(r@?tubNq`-jnu!-+FmA zt@R8Sn4yk*3kaZG zUyG|&8&v1Kv4$Ii8pG-1*X|_%BH;R>mpMRzX}^{R9)3_^hRS@&_>TWfdXV6;5%te9 z-_Q(p0k3u4YPm0FUI4oBnD|-v+kv)Rt#5UrRQm~6@Q z{H+aPC--5R#69$vnipaur4;s@@nf%6+D>u&=jjPC*F;ma&tCLh@eyGw?A_9vjAOc- zmR5?R3saoNmm~}H!}ilMs2!WojT;@2H*J_ru~u;z0<{{C8-U1U-Tsc8@R2|5Ral+x z)@zt^wPxu`0AnzaDr6xV!V0}y-8Wo+H|#hz&!Z~Zt{$$-oE`7Pb=gdW`m4~u3Vd9j zmSl>1bc$G2AjC_jf;Yck?m2awIEkVcdFI@_)p-TqH)dH-hncGGI==Fh%L*$M6LfkG zqLwpVGf*Sj4Xt%Y!qePVSZ|&asbS=c-D?l&&z+RqGBvXmMBME10MY-QrwT>0W;evK zD1^Vi%{1N@6ZDBj^=MogkUUg0vV<=?_=3@vjEm=0GJw3P>Vg(}qOhl;nVE2Q+^l@m zB5rC%4^!00QY{LBh)Kb&phm}BN+)ovU=}>3+t&6H6T?!@{z^9j360Z3NUx5S?>{p)OFzGd#BKJd$Bb-|&HI zQ8mu=8^F-ne()(JZ9K^k_sDzs_4UI!O?S~Txf*>YP5aRviX-~KgFg{0*e5^MjuJBp zx`vpZ#LAxfx{Ip0mysU8oDduUU9ic3pY=he>gAkBm}c4G0&GXW)tf=GJTx3=`Ah_` zzJF-+ul;-tq|?hC(UREaQ85$SgEhum8Cmox@JLnw@V9=QXm1arg#HZa0%+=@W`u4_;xfzMQ~{WJWDI9{Jnm6SM}!%Hqa! z26_cbQ<)yS%Kb{>WvCojACj!97isBz(~{ewVfM6t@L@}Dh<@D};BjZJCKn&Qe*Jne z$=I3v;I77is?71`5A9-Y_Q58-`?lr-U+Jr`0K%x2TwL*)^-2x|xTe&g_L%B>e}j+seLi$m1KRr9ZZ)6-T&RH2rs#Ze*-*NUr5W2gNP?G zUyasQY><(NSfxKvGs5${c$;3Mr)>fr63ZC-N2K^o&POI@-wM~H@6%5-ea1}LLa6+GTauRh)76%KB}#YMr%duTV;bbS^zE`` zi{o0dK(!9zZ|+$G?82Iv`b*7M9q+GK0U3GrO8iVB{fIZEmcy!n!p!$+0N*aAyR?$u z0Ww8oNNdzaceA!3K;+>6<{;;c=DKh# zIM8vcqKSMp08wf`gg{llK0Q7pOkX<$%uOU)w$R&d-0Os=S`5s~p_%1~bvN}c4MY~b zV6z{|i$optP1DiQJ(N)Gg03mXyutvh!PV5MB0l)|z>QASF$MLwR0o+%KLA&KV5W}! zJTKzc0ZLaJmi6CVy|1w64?Sv=E**`c^G z+zU>kRn#*DW??d(Ip6M#P^XhmDH$GBHilNZkd{0UmgNmw3by?*NV3~GXOnyhd? z5l#RDNJbKT>?i;Fj}6sQQ|(&setkIlBh4>PBPH|n)8I3pKDRYCLiQ<-KB7$D9$)gj z>A4dAR`VmH+M`?zm_Qn7Hh`}&AuY{^%jxHA4*tkvt>u=ZdMR^&-u`&VKB%I_CY%n6 zsd;ul$>02&{_dE=Ho)YxN*%3!sbjm}QpEDJ+_dQzje&^;=`WGh4Sg2LVrjt3sQ`4y zlEm}UO3*kM+UJA`061A8yCLS`9q&3#nL|a-c{%?&kCg!u=w#5@$^J)DVkq@W--PXqI7~1~c*fmee^w9OLyub4I3e@_N|cLJGwTuecTm2aEzTWu8AuSQ18O}= zWkq7ekb%`wKsXC(8lu>zZv+UjTJyG(9!*&i{VlzU((=Y%GU#|7kk=}MOY^XgV?e%R zsm`DmkS+n4#IS$xkyrSpVySH#Zo)W=4OcCPop=>yK5n4!)%hlkC+AU1&dtYV1qIGD zgE)^ORv&GLBa-hnt?8@m-|u)5o@!0%^QxnvckcqhbtjY zH9t;e(q+Jns7sGBe{TYKHyxrzzXvb4`G!M1?Y=_9Rj*m);DPlOkd5Vw-*HxY@Wc zP+HOIaO%jfQxN6HN1q3$>j4PKE04uaQVZHRTMp(w5~5bSn~4?*UE*IPOr&u7>VVG3 z4JMepaHXH);+3>z40=!2cwwLK2LNHeJB z3!UDqcrvT$-q~;1qIoK0;~6pXjIKhdZJ<_x&R@0blE*Pn9Xbqh)~d~``S9$ZtSyf- zz2iHri^P4lpqKVBjg2qDtXd%PlU`|14l9VwXEfPA^ZbrPfUByL-ZA&InLdVZ_uUw( zY|+o<2TEyX#v5X4N@(pD1-4Zuk%pGwv7wWC&hn&Fmx0fWmv6q=nb@0HfPiK`PBBW{ zW=rh7C0aTSS_*_d!8%%4G%UI=oPB|NJb3XKVyD%r@MF8x!pI2;wf_l*O%w#ppTW)V zv&QZ=moy>-%+32h{0!_(^$X1gQSW(VMp z*XbAMU@njh=>UBoSjXjhkXy4+(fWr?0jvctJzotBkb_d-FA6mn&nZiAVndmHhk5YH z_?6$JWxK0G6svo34gf?{Dz%2$4L<>x*(G}C86Z#Zn$w;$ZS=GsmMau~TtPVgNT2`#4SWJjcLU~lA2~TCPsBRw;S|J`Au`~O)9MXCQF5Dou7H*J)RU(Bnknz5 z_Kb`VkeBnYt0nDHnO43Eb(w0-hNvES49z{dQ=Z`9`6d!^2};EvIvHWxXiYVM8kDjf zl_WjJBhU8KHL53ojC1a|Z)iUa(Q)U5&O)81D!A299t@J5_!9Fq7AO8_q zgD}nQMfcCfWYg{t;)#|8r7ruIJ!bUy?iIZOcQCTjYc)u^Der+$8;t0lIBL% z9j2-f(#1!fs}qlWQCFsn7g$+Y`<_iYe>bKQaFF||o-R7!I;cXmS)kW#cFp~8DY@5U zP@2|N3VMtlulZ0O&N2Jsh+bywL-X1=klL%jO-p-w`PQ^C5f4{B_C1u-@k%` zkj47PwGn6x>d}FW@8&D`ueLDbL(l*CE&zZUtG%XPZJlZI`^xm$=8tR3;*-Rc6NDna zF}JsyF?EPC7V%|4QPBzEsFHGk8)Q_*?q%P6G-GB!#ba!t2d1JIaky{veQs`M<9$vj zHZM)Tbz(oIMZ6SmjLHZNES2SyohUa&012!D8sL{a^8ZrZj=V-<^z8>fsB6{yjrok{MK{%fK)~kjR8z+3 zs`u8tH6i9>+XKmKS@EV38mVB)Xb04JCgL&+*QU4wA3==92-pu@vZdYt8F)w-8Feg0I0@x%@M+g!)0Y-!t1zMf`{`p3e97RE6zn zBXO?zi^LDT5ohp$EA6zrDFzhF*OEsCzZSRt2DqVLAt=-#h7)*j@AU%TFLAaE!0oww zXy0%A;c8qizwbAf|DiSJqks^H?cmQ}J=GhbV8(=0qh*Z_D~{qvQnXNKG+5fYPj11vCcr+UWY zVcHsaBE5zeK>Aeu07Pvig>L43UgJDGLVbftyD@QI;1vJ?>MZnu1VGk%-j%#ru# zUOK?S7C>&cGTOc+q3V&8Z30v-?pT>4J2{bkK*IQ7+-*YKW7x1ZcpWx40`aOGV0GJTq%)^82QWmGxJiJ9N(PGj1{mYP4@t;ba0W zPeR;Mp$fJd2wP3tx*bYy8Q?)h`%4%P5{7n(OV2~K6!rkEE%eSmb4Nf@MhJB<|DsIg z5zsHX?qM`S#7?o2z^d#jGeA~n9CmfK&Zudph-b>PJK4jIj^^QL(uOxfT3IVlfxQ(VgepH|kDpMFgZA5Jbl@=3Nlp6P5RsSG2Gdi7>6`FG+} zEhriFfM>!P;HLS~+jL9=CPXKgJsS9Z)@AP7tuX{Yih_hxCWY zgTuk#)6ta7&v({G;4U-uZiFiJ0A)jTd>Mhk;39t3~t8R0Ga&Nn^ z%{v<3CIKb?gU08f0;KH#-5dJXF&{o`Q%<8^Qcb`>a1B~^85y&gl7*Y^Mf^<8z9d-J z(6up&lP@b<#_t8UJ~{4pR&Ns<%n)C|r`nrVBNUrzTD@VCPOE}74y}le z>-en}Gs=oVHGJJ#?F^7tuhpXNHX*y`uLGsSHts8%YgO=ilBqYi%kfvHQO_4mBQ(`i z7p`^2D{Wt%TzkI!gY)%bM=GUuvzE-TEDyh4!@ct5;yVMb?2FJcch1VM?GKdIGmZMW9XD*!Asl|UACp-mGPM?&D=97u!y%d z#y`V%fsewWxZo2z>yx@Mkc?dh%-P_pt~~i51}sLfg3EX`_ckI1B2q%Q_^HghFa$8s z0nooti5qu=u++Z=ml6+NtZ_Pja%-&QWnVKoxK&tZ0xPksnhp5>wX@{RY5jtigw;Z= z8!O+2tG9utB#FJJ8qKWa%!?R346r+4yXoV63PUowk2^^y0PRz@a&&dr)%k%s_xC0d zs&8QCrILa>ta5sh73R0QbG*V@?B%ZapUlp3AaUfSe zKq?73IqCo*_d*?jYLWQ}2spFWu|mrkm%sl-|DiW2M#QeT!g6OhGSN{)5#?y~vkj0! z1~%?)&7cArf=-4?j*NtlW-0-%|A|$pPY+Od+!$B^iG7CzV^DGRIH!;h3==pzKHLGz z|*7s??kC0 zpd_FYV93kFdu^=g!)oS@-IangKavUIdHgLKk(d9Y2DU5N1+bHIM6zCH}OFz~i8Zy!YN6W3T_{CQ^T;U6D#@JT>9kFbrV zkDac(nGgrX<8=L1CK3B+Kj{gH6E+=})QM&d!_wXzH-PdHI)c}3uh5?%amHi3-2V4! zxo+WRM5XmR=vZ(Sw$8URL(6USbr3QJ@Xx@bquUvMzj2Makvu&6o=|Sa)2vWu%;;%~ zk;-hcMqj1P0Ae2+JS6L>04~-oh-*Q>%CJB8&;B3w-YY7qZrv7D6akSWN)|*^R5C~o zr9=}+Bx4~NB3gOX#BQ;}0q%Gs4XF%h*vOXAG-Nv);<^Ij6{=kpRMsKyQ+FomKPVRW1><=_6a=9pt!QX0ool~UwB;U=}5b$6D?xOMVAzq>2^{Ejzb zD*5rEK=@o)fniUV9{0m=(Q*AUCk4|zLYdI+Wv)o>-yB)^Qg}~XXJUD+aqjJ> zgxH@dnknC0L+$I<{ezX9`CO&f_8!i{to(u<#xOz=5n(rkP|IPTZfJF zSo&{g_-C}+hGKU~v>aTS6Z?J5IunF1H1*u9n|5trEp-ohyDxQeh~|?x&(0|VM@kJn z6;goF38UXT9Bt2?li4hJe{d{4@m+)Svj`9SLraQsZ89W(aY-5Ia`qTNji#f(=qbx} zs!Cykur5Gf1h$IFH}}!)iF+i{tHb%)kHEn)Fi~zB3*-u4uAY$%7040YeVMFLuY-;I zR^kD&fjf5TWL{GT8FjlVo%1&Zehljm_w=^koeZ`B9&1Jk zuX@9d2_|?nQ?B1$7IAS;<<#erRDls+0o$w%F^8AX>h))aH&4IhUSDKJMfWH$Q~rsG zT{htA>D7(*NF>KKcfT!avqhG+wq3#pRQq%qd3K4>TMR;o(h0^zlNfiMnW|~8`iV+B z@OkriP4|?)nL4jqC9KzWW%CVaz6w?0%UKbhmyb!ROIF4G_SPjpC2wF2pmiv_e2QY@ zixZY4B0NFfJP}IA-GT2RUXH&`bEpwL!E3C4I~nkQ#9+?S%$R_5mH1#Zn1WsWx9$WGePY z=V{h|^zF_6!A#5Zsopuk8S>q;Fg*Y~VX$wiB7Nynr zijs;Bbq@djB1ht?I^RgsDS_$8<>qwcpJve!(O%!9Iv&&UfvFuZk8;}CAhxo{;FP2Aw3M)6XH6 z1yHgJlR5LkYcI)9eL>bwwVAio%lICL30V@(q$q%myV)9kJ@!m92zDot%Ikd7<)Nkh zFPzJ`Y$LA<{&MG;|L8r&t$&?l#w`6YJCz@nd@`y(n5*3VvF_lt@BXl$c^AXXTM8r_ z#Bb67;j;SD5K-1wX03fD+T4#*qCa0(63jHgDY^0sUVr>7&1dJSGmH;$D)^?e-uLAA zuG+<)UB)-G4IG2lCs;9k$~~L{U&*1^eZ|Y z!1TjUT5$JjFI?0-OXB2mPjBghUHT-3iJyW_&=$zL7PdZNc!`(_E57uIxs~5ZzPfeW zMXvKf8~38%%R>1e`32P@Y^KrLvW(#d`IxoqX?`SR{6_+h@g&{mx6>V@O8>iK8;*yN z`OtXawhnr5bR3#}X|v4g2c?vgl`Q=ymtwyRv_$EKuH8=onheUU3N980_# z+BqK?+SPyv!Pkv{5L#YTH<1mvUVJtt%9{7H>PIlS$~O2%Jiu1lZTm0(ZV#Mz8WT3_ zF1jP3;&kkP>YNNd>iU?y@D`<7bJA+P>kJvT?TAB{>m=W#^(FD*lE|=6O_Zn~+{3l$ zrC|ruutr7s!L#qTux9%Q96P|1nHfv4!H_mcc9uJ&sHEZEBh`9WxVWBvCHjC^Ub#)mWeO*J>_aBKa#Sc7aJqX}Yl+!i{HUjL;(fiJg!w!SS*^15 zi_(TgMTY;u+B!tua!`^)s;?jo%?A1DxQekPbd~T)Mw<~%lTYlZmfxoRb?6K``HNHu z&7ZeYK*N&&v}!@MwkoV*9UWAb9n${OlS{}c#hyhws*e;kQNIx~M}sD|J2k>w4%*A( zOCdaK3DAsIEhnw3aTy8rMEh~IWr#U1Q<|m$PE(20z}l zx$a!rg()_Z3=AXv&pPyYq`w1}Sp~@sG%Jpqs_6eE9+i`bfO-R&T8{jOtC{R`5q5wu za*9S0g*f7MX{u>%oREj zcrcEz?7vm=3`|0H3ykX9&*Z=fy4!RsNVp+5bW-)5< z1t=x?t8OhhO&R@uZg^5Abh51{SyUBh8kR&q&lh(R`5U=` z2-lw+4hyCcmPz`_mooRGAr&IP*2%%0>X1iua_nhM)gi^v@>AKum}-4$h+dIbTTphK zA%2Ao|E7@fXar?of3e7 z+R1K#J?#sZT>o8(4+J&-(}~6UBKY*wqz8l(Pnp{}{D(efIr1s{xF60s^rANoZ(9R< zhoxd~%Z`k&X(T#e^kiir|0`m<7|{TvPAzG%CB-j;He>xZg)cY^l4~sBLdO{h>2B(V z(5CuD;;eUbeB|y&c!b@F1doJ8S!HF5S-vV8YO1E2$h#jjbB5AuKn&ONQz@k=fG%^Q z@+3)WFzWhXiMLRBuEU85tE`o&JPjo5(oV^0C!!!UiFq=O)i#4zdY`N=8q>} z-{ab(!3=3$3o~Y~#stA!RIWS*vxy-8%7YX5d6scDAZq3GU=DQQOdW z*nYQH=H$g0_Bvj%(My%&6on&wy1xjs7g;`;&KC!9OK*pr3Rnx#N)_QpKtO%4sfQp* z-Ry-B;^=tvI)Gj?HNU0aw&9tC578MrbcIE%@6dqWWv98+YS4gF!|M7eQ16~Z_O7Yg z0k#grRYC`WzD2%y4`4siO8L~jEcXiz<=x4m-XR;zfKNP=&;U(WQml7VGtS=^j4Yov z+-6_K56*k);aM?_By1jO17{Q2scP5Qwc&h{$@7mnA>S3egv*B0zjBJjzlh^m$dg)2`yi zA3BfbO1F1s)XDVwzRl3(if5B{?Ws-uTn5)wB=*VIWw%+cLS|ipm#qe!_wJZxVlxXG zMT-y(nnXbMCdYr+l1)^EhwqV(>6^Hkm*WL~#E&Dl#NN7cu47`<8!htu2{!ci^`+(t zwcnZvr>@MVT>J{}1RhvXu8noE0y_BcR#}0Nd6WKTOfr)WtX?VA!y!){g9EX@O-xx$ zb$AS``Kv8tf}YaM8SaJ@JbWP-SYFcg*aNMSn@cjyBK+HeaGlx_-pGOi@0ZxGp855 z2k!Dn+L39%@>?E{Z-!@NmRJ1qjz*M)+29t+{L&-QFV!B}4dwrltFw1O%flt&)G>Y~ zkxpX>UU}rfVd3yfrPW}2^wzH13FojN+B5g}XHU)d6=*4)?O_>S;ZC#8yIN|cXr=s# zkb&910HI@@W38H`0Dx@uZI1B^B6 zm+i;_on`)U#5PqsY{u5cd~@(ooscCQU18K5LYz_zh1b3HYIw)e7c-twH^ImJC%3%k zrXw$hL2d<-5#m>yz5#SfW?(|-=X>13x-xQkn8^lOZ2G}|a$~IIwf%5bd!cUZn-~~(^^eWvF zL_?j#N^GWJ^#+~G%Y7_?hBknk2z~9>naS~ z$o02em7tJ=i^{G|75+4(Tn8CQfDYyKXVUBj;*O1p(~3J} z{u{4>>HZp5TrUz`<{%@|qkW#R^Y%l0S1L-_20b!lI$Un)9Iy2Wsk>~aY!PT(B8#;% zc{zCKJ~A~K?0dDDEZSM(?Ma|d7N))Uda2>bVfKZo{)?2b@a(J{ zl8}ft(adu{ppWX{(7Z*SJm*m-ou)16Bc&LXD#Mu<@$Qrn6}=a0K}%4J7RGy2jGIqn z2OXC`I~eg=$Fyl_{!~D=6Y#ZdKuW>o`d$`vJ~dHUe{DwYpd`{$i%oC?KYO9C_%;t% zd{mj1(n}zAn`u1`CI$%asdO$tJ?s$gU<-Yhl zUv29oX<}j`EqZ3_SeGa`^#+2Aezl+4lK%MG7r-UuT88v{O_Ha+T=+UbNZSQTIBJKy z57~%jm3{!DBTiS3Rm@wdVD`1QR9FCNSM2_J?01d4yNsn3<_6-TBMweE&&cNZmaBj6 z8E`5ZcyE>of{qz6hzC<{T-tgpkSz(dYm1bJj%_|-Iz1cY=?Y9wqYdzji9z8%d+-OF z6OuJu%i6D9H6Qi)%{2v|PiIV(aQ#&4xqG}R$Eh$Hc%b)tdUH2Q|L~~j4pttr{!qu> z9&mAGkbRyR4^(9R;znskBQ_E0^&9f3{cJ*CZqzry%$MtHOq{F9A!ntqoEs*t* z38ED_B~Bb;RqMAaaAiaK6Zv=9g$fm8<>(;BQq_X2t~81<=2kg-lf3`ws5@6=KU07Hv3JF8J(wX*Ry)5kEHZBG zda!Fp%b(_Zpu4-&nYzbcP8>tRjUK^p7VqTea@mUQV9D8h%RMrTEZc{XG5!ZLddm4s5_;& z_Xy%yc^M{{{DWp#fy}x((ig}-Mg|{xzd+A$ygV+^fQY($iqL<))vH3$yuLm+Qti5a z7u>|lI{Qbc=;yI_mE~s+p~{>xE4?o*m<3J4uH29q8cV&OR>8QZkt-J6<#jdCu6`0d zw8MP<(xgv3A4eyj;a=EsT=h6 z*@LzPk2Mbwa;0m&XKo48nA7t9uY7b5J}NndR;33z^A#0g>UrV2{pY<@s_Ww%*Trq= zBMDaVUiqH3`Lx6?1U&;OE` zT)ATMGNfco8Fd4m<81S-SLyWE%O8ncw<4|MF1ymU{q;$&`>f%?(pp!yCLH>D4;J#s z?P^EDV{ZzfYT~i|;K3g?z^3{Uv_L0yQD+B`6sCpN&-bfT|K$R^O(1~z6w2nO118%< zTFbv6VQLN@gtRvioK<#W(NUN}gcz5!Hk z&eAQFjwtyH6Fp;3f%Q?)=g5Ik+$nqAZQN76(<;|w@Z8ssj43@(AWcCN`Lf2%7BTF7 zupz){3H)@U@ENap&97s6>G7;(Z>zOd4y(25=n5*Kz}x3i!(CVQ0)P6ODic)FZs(on zUa3DXJxfLKuM|Bz=MQk~XCuw!!`w^ib$3r!EE4N4*(pQpfj2`cuzWQz zr$0B~MB;JY7716-G1*gITp&I5- z$<$-yWA3Rlt`Q7iCl=BtV<)2N77|DtHl^gy9$kinuIpLD=f-AS(cc+Xz;na9HojqA zk)zThXdTuQ&$ln>(Wj;{-a(0iSAqc$sn+4@p{%E zwyM`3z}FXNhgg+{B5zRl8-fWLv9J2r5m76p-|EzAgkNrHakp7Dj}kBF{*e0TBo>eW{>NV#axVTc_I~)&(I)QhnCa6DE+W)R zOR5hN?2=G7cVHPWvNLBy@NGCx;^L}GXJz0vW~L0!K|CPe%;(LpEhzcn^<0OpK&`L_ zUGJ?|yAM=fbl`SdICcvoU8dZTIqVP_;dYt`vNgKykG(+T_+!+enPTvL7NA*0i)=^i z7FK!D$o)b3*DmVw72tu%h4XONZZ|3T^b=ulG)gQnbmZ|U6Pl4fTyTS*#eW`HsrLc7 zHdN|h)TDK@Vg_meE^%>oRn(OfgSRZcF=ALOh6^V-{&ahOK!Wqv5RvnpuQ*(^Ol<8p zZBIodha0RTi7eZh=@Z;S%7!*O)4?#9oo9BXTZ0DH307C-u`PW5z>7AN_u_AO_o2lX zqhsTuif;9rNA1ntskj~Y-dAb!$2pe<6lJ#HWE!4xucVHmu*+{-@Y`$zE$$ecUCOsH zc0SV0-&FH^lPK@*JF5->+J*|gdW?wG@`h;@;i^-i}3e$sLScUvnlL5~p&+4$A0N6hTJ3{nj`^}b4HF4zTc zn^4L#lPOB|(lb$~j_3{}xqdX+{c61QTi<24N-SiyQg`Clh@Q2k*S94vx|oNw;~bU4 z5}XY}jmF{CPIgPdwYxQuJzUhmgM}hOblI;wA4gk%t111iX*ign`t#)KQ-c|_ZEBy) z*5JCBar+JTpJs~bdPsO1Zel6{!5wSZPV`mhwtNk%talSX**y8h1dc z_9&U-6}R^k)(kU(z1qDU1gyFD*b3D!Z}0z_`UVNo z&@te4DPl8#6Gra8;~-Jn@J<_ZheZA~m0{i~=-}}5V0AuJ6jwfKQ>H-j+HdCF%4uom zI=y+VbHjv8(;hS0H?z{bAs;{9iSX^)Cmlf?O-aILD8r^_M2yPkzol|gNX($w#PhTK z9aICfPjoF7v=-OhE*a->bi9dQ7GJuryW>v1HJ$yJMOXM9f%T}RF|oI2O}>?gNNNso z3QW2|2k`bw1hz>voWhM3Oi|x_SAb95VX8gPblvDA+)(P5`_#J*yz@-G^oCdCd5e~#C@4&Lw->BgKxi@=Auej|a$I7xK9=@-)Eg#E8 zJ#<@RWNnl(uCm*>z;^VgzaVt3a^_&der6xX5e2PCKbc$}peK^}Ktk2qEye79Ra?{u zv*r8ee%>`0uY}H%;im?qvX~y}E^G?cvP^lY-C7)5Fq{mb|gTqBj=FCWWE zTpGWTY@=+9d5C4r=l;SJueI;QOo34lOBTS z@%Z_9FR46^T0+sD~$GaS>-XuHVyHV6m zr^MnYFM!+o+}S22*Ie3be4!R0y1SCfY4$ak?wKbmuz}C7bn@2xY@h`zl6F|j>Dl6s zteGBO%`v%q3W}-z=MgRAu|KfffItsaJvi2@-;(S6bOucoeOSEy*w2!l@mELV8{!6_ zgvPdSS9_OU#UNiVV={HX=?nXD@NU^Gd!l;oLAq8Z#%+8YB)W+2b8?%$ZQpM48&X#) zqb<$n*KlTOY$B>ZE(YG`@^}%tn?8p%Gs><71>{ij7T=>aMNyHHjgmZXL7BB74_01q zNrK_NXPmkF7oA(AOJcq!6eMrq;}_oJA%nb?eRvAk9A(fv{8a4NsKo!gd(^{;s^C#`F(>*t$e2{n%C->Y_u zTxZlTE^7+~5pC1oU2&lNClt-Hd&81a*6QetwK8u^#59Y8D0Xl7;RTHk9 z(l+6BWrK{O(Yb*MHsw=MyniBHx&(&vX*Qz&54mC=`$P`?7d4imLGJ;_pku?DZTi;o z3uXvZ9O3y1OLINHn&GmS?PCQAwcVK)7q<2gwJ2{UAW66>LuX+5*AVu<_GE-KQI?h^ zq?5bk)j*2pl6IYfRvPyW`M40|4xc}^lX!khLhg#&?>mXtNZ5QF1e&i1y}FeDoGPHI z7r2?US)98Bl+hZDn1)91XGJCMU;@@m1*y$>LJzMaJwb!)oy_>zs;FU?!7}X)SETlh zbdxCYPwwt59}Wo{F-@(Oj5+T^Vl+e3tjq zJyv`>^`ZL*x~|Go;B2eML~1sMvg8}SD_UMKmXQx`Zd)H%!{`cv~G?bTG?aM1&|f(0OO(jixmCW zr>^a&@o3=1ESea#tJ@!m#QjKhn!fd-X4568x1wO+bEvHaV#>`L!1lt(C@*A>?A2Rm zP#MNmSed!nBw8Qdq&^Uk z5d?n$tw*!}B^$kWk3@hE3_nlheBojs7Z64SlomAE!C_*fGui5wE#0&v{INz_tro4DKq@kJzH008e}cW zFugrKKgXaX1stj0(Pvg1@+~DOqvQ||(MG}{Xj2oCuA+B)A?Yo<_Wmam6M9!3ODaOJ zEwrlH{@gD!q;&e8)QtSj{AxFdc16WDJm(|Sc&xz*_wX3FcDQJ_gvnJ$ZL-Wfw=ELZ zx_Q1{T#3g4nvc%8M?^#5kfr2GgrQ^&7i(X-&jH)&t(F!0FJ^5$w^p22KspAbb+8xQ z#fe=$x{_`QIf34k5|P)nA7}8biH7&nybjV^A%;FVNANKV73lcHTt8J*HYz@#6oN^l z4^K42x?`3CiZkp=1)F5>d*m0KzlmPkRrWBM@Qm~zNHnZ|J`WY5B*kL}*vM7qPaSM% zMyNuk_J);1Q6kfFe+N)De7nnUfVNZ;4zjxGI91P)ZXR?n_~-^d?0>Jt{B2|@Uf2t* ztJ+fhn$AVd4T)rK<8OX{_oCuR{43(&tZMVqk=`q1BhMM~W~#Y9-rILl5c&B3{DJ{~ zfPIFO*0bG?{99iid~hFgz^YfxWd5Ch%6}O;Y7i^2xQ>{s=9iy0{|QlYYG)xzGRQ3r>1_QAEe8@jG1wO*G|yeV}?suI#`3 zfeea7o!nAiM_uBBLord)Z^g*e%Htd>a{KD_ouHim@FxA8MUSu|I^VW z`u)<1XHWk}Cho6xG*$ha~mqzGy41{ujMet{G&4VH?IQK2)A=(Mh#%o(CT}K*dcL~7mr9TX(Z&xrPt{b$`pQGjP4IdhN$U%GV&IjHIk2E4bG|hBkG<^I^oC zcWU6imoI#`4+_t#_3D=6G?N)`Qt9+$vo$mF`b+E|ZrpOMGSR^4DP|c$s45Tyk;$?c zK#(B{ewheU-(pI7;}6VsFhDok|1>xIk(ZoUXp!#gcGUrKGKE%|&#kobE4$P``%uS(Y4he!=*#1cVaE@JLhvv;5^zBYUsG5$}Urd+>j@r2HSgc_30j!X&io06uhL827;PeK|bb} zoobMQMx`I_Iid=|?YTgXo0};mxyxRA5&`ve{XC#4yQ%KbpmnZ~FXJ?;_?Y8sNxoc&qQl^WvWKmLh-%W&i_1frZN^rd|ca_&|Xm264Ug&L|1wsIh#vFXE}_ zmD~g2E|bhFKm@h*{Z^@cp6$lnblf+SsYwu&-^=rjEg~z3-|7FJVVo$l(T?t02#8`iz%cci4pqDvR z_sM__exj2hO(wKTU$V_0N!&4Bdb{C*6{Uh+l7=9OzC$B1Oj%^?c6acP!Ve~OJ%Jzc zke=MOBF-2fce&S%3X2wJ@b}IDHtmUNy59wp#T`)+-r$`DbX^*ie&u84&pj>h5?^0v83evc z^ZCgIO-OxExT+NG{?%7^Z#?|B4kWtrdJOw~v038l@~n+n_I(S;iEi~J^w%miP6+bB zOQ@Is>;<+bMB7?MHPX>K=P>s?Dc0pJ{Xts8-9}e~=U>)?4r*V>Y?cmQmf~0n8N~v! z-1eA6bSrwh&L*2iKuNM~UsKk5UJP8zU?Ss=hmPr^$w_~pM0>_px4TS^Gx7_UtS z?3oIuFZ6fz^9^wiaB@#}&R)EH@cg`o8ik`aE7^M*JiF|e=W=T7>@5h}$r;yvm3G-R z#Chlro}FpjJlXawz49-yK%Zirn-!IiTW>G!*K8C&zp5h75Ol4Ifip^U;!{*D3-x}w z$NYz<*FG7Bsx5#TrMgiuVOL`el2m$BsmAl_&#F6H9G(1QP1c^8ZtKa9#H5=`pWV!B z$v3}W2n*zwYU3zkW_hFYy} zxY@F%=3vs?rFuX%9%`DSiKVJ)xS&iHvA5gQRKTA$7*h@fKU7XUyhIpK?8WWVm?-y% zdwOjy{hTq>baH|t-)IR@mc}O@yG5tmE`dg+5J-302bH#FO`9XhPF;&sVapdilR&F8 zv(Aua@ncG0pHAOK9s5(j-a)VbFL zS(269EY;Anj7yL9rAro~;w3~zpaiH){n`!c*I=%7J+LPv^B4y_^{O$e>X)SU-Qch@ zwfPh2>fIhm5?mceVZlT(sU-dQVb-ZmlFBhS(FWRVSDn{_4(fWU?)QS1mHcpoSEq_E zr)Yfjy}=Uw7Ye=r0r@vAK?jYb@Qwm0Z+2c*bjbEKRc%tpI`mQ$=OrK@ef@`$FH`dR zctz)V)vYSh*u)l6CTiX{wcCo7WFUxvv6=Y>>#!t-8rnlY-Dh93S-vy(XQnnqW-(6V z6LreDTAxs^=}wxiTRt}l9(gA{?)4fQz$hX&Pllo8+)koINaWM9a5yNv_%P>7#W(BG zXktV`bY%d$5BIMVJDLrQofV8uKjGjFbGg}Zn<_h)evWjqiuEftLa0Y)*0t1zH2A3; z!}QMfm+-z5a2BW#otuAb8D!B?j4@`l-(Y}|Y3W(z7=!t~s;BJD=)Ut*!JMfsFG1YC zcO0K8woBo_<2%%)Ux7S`ij;%8=;r)Un`pf{i%Z{QV%9Au6ZZ3q42oDs>BSG)j~R?f zS3l4;I*mnvhv@dovY1&|*m-pda*8JxdRd6pIHIMB#TGuweX8(p`b1Z|R3XTqL1~s% z500c6o0P1m?ZpcNl)D|0u*U~rMW6LcOrLn(otunuO-6=E8us-+K!@FKs|iLTJ2(+1 zcS@HTuVAjZW=*08p+Lk%xym?S#Z>_rs`({6qQk-SDSU>b>PA$(&Fx(z+*>1YoBx@( zHrZxgnSdIia->ygpqCtXTk$0NooBW!HzcoMye-Cj;x}%WnBh5hkvwPOfi!p16h9qA zP%DMbh8sdY6-Q+lGjC5(s-WqsD(ZN!`=#0PYee?86gI z&kwEh*}FiZr~>Q3{?V~J-JAvbbo;OkUA1r$pCz{J502NII~+vR_BD&3%L-l5S&=tw zyHH6a{Cp<7DiWiRr6iL9B>gTvRl&7j$-e8c3C-hr(p2lcFOij^c)(YC@lf%`u$4m)iE328zEX38k2#Mx0O#;YDJY`-xOi^t z1WcQsXFB)H^?&jb11>(NzCu6L*cEn>rk7V5MbdSmXEx>k!1jcC#5jl*93;-99l_^a zk@LY4@}ptJ)Pkr@$Q5;QzSgG~`2Ntm+7=F&l)c#~AT^8`sH8=;itrT(>_jBVv=ozC zV6&y5k-wQwD?s^p+@J2fRYegaJ--7`NWmGB)?f&_L`s6W+R^DDwW(eocI1pAKWF4a zY~pmc%@CLdU}w1EHFygUk{(R{mogIPJsMGBA(iJtu+N>FR6h&e`9L~*2#(5Zr^u5S zI4CH5es@EvI1SU#0N}CnG~A>nzf=wsxr!ccvTvGXe4aSlk&ZOyA4ZB!i^*H|%HCKL z4LJ&EJvS8kYuo9?F^%NSkSp-wu=_{IuI*>|96sOs#E;|XZjt>7aw%#Z0XTSXF<{SR z7Ev~$$Hl|+HA0|#(G78W=e)ef(6Ge%9XO4}kmsK>(|=U+;6g&NiNI4kpI~DE)a|80 zu~pxTW`0sGH5t+MS%N8=Nd`V)`3Zzifb}aM(u=e2r|pLH!5obJ*hRjagGb7Ajw*!I zEn9)mtW7jHoyssXN9QVW?zNMp(*VtO-b#=v&mS# zki)~`zBH-Xi|0@*&Xq|d&Jn@4FvFrP2V!JLaKbU4%AIvrXp5BC($~^{u#C$eiAS^3 zeOgl33G=tf6Z9AjxnWgp;ar8Iq|??|0f@N~nX_Yr!p_;S+XOmZsa{U&xu_=UY**O) z;e2_&B6ChFb~TjnmxG@OeWJn0iIlR>0#fR8!j$Ll^D#^aL$hz0&d)&bm}oaX>mXDs z3+s5Ly=WWGlik4GUcMp4Vxkb^bQO*?pl7WfuS;;Kby28%pHxGekUE*gW<+xnCC+G% zGO8`53V6h_?$$}ZKs}85z2%&KVj#!eXO}|yn?}~ehqa#j9$!0c8eXg(3H7G9I_4a) z4%z>%T9nLvu@{qU3tb}BkPw!+!`4O=N=<&y^d@E0_d5bx(kW+2rRm zy7fvQ)>aE`Lz*%TOnb@xn8J9$(?$(=O_IS0RAV(0f%Db#Q*zM#mVR5oysc}R%K(%kb1gTD;!r2}L>jj@_+7;{c6v5|I!1sK_I+oWy=Qsi)JGDO zdw;~56ss-Dn!V^FmnwiBf?ub1Qkpo>N}Y$R2kvlJ$+p=zZ_Zwh)jNXua>XLmHuU-`J6q~r=#WyH_S)@}RGt5P@O&WS&P!zsy4)>A3@1IIMDbE`nwNfKWx)~_qj`*Zsz%C>$6XSjiB zIwj?4JIg0=N-b9X!L+5_@o127Be&GZU2Spxg5x$C_%fb3w)z9i#6Y6R$ZuG1Jb(la zfYGFx-Lh(x2q*FwX@9ogb#sS$eo{g)Zfla{!scJBa>p0?a}{o^x%H&>(u^}&4Bu^| z{YQM}|7*x<64DUnU5bty0oPjmp1>@cbeI*pT^}8Qd22-LNlD?MZWK&awI( z&C&Rd-`b-n;!EqMj;Bh*6o0AZ=usSM?li1zT?`q$Ja-YQD@ij?eIUnntfmd!q2Zas zvY-?mx}LqK?{}=QDP)cI{oL7mXgWM8)zXHpukp^&yz7=eoQ(X%ZaKf`%jp?ON?mJ2 z%Dw%@P^9)44tyTJVK7q9Vb|$4cO4a8bZ)U#PYoIfkJ~Q)L~hFA1T zyzhQDE3vrhry14?8<(0@Jt8tPWPn{+ix9adMOr+1)@!g{i#z@WD2H4HiXt5|wRamz zX$N+@Os+B(pQenzjMptD=)d&Sjd;u!dj-AeXkMt$LF@y2nfOn;`{52I$R5#WhtBf4U5TSf&J%CJQ#dVK)09R%CbO+F@>Vt`Djz z5>=`YF|{K3XQW%_PIT6Uz&XbthbZ`x{6vrA_%=_)TcSac+$)#Y z_ry0#?b>~Jc$%cgYibFZxB#>G(RjCy_0$PI#4DwPDqK4Jf$Ee3=7n#WH8PNR6T2oc znZiit2omfUm7Y62!V`ir5_3VcFDD)ye->@^h^amE#aKD2K;f%Ox;M4=S)BONaMW$f ztP9SUN=ZEKeP1B>E8PSA^&AEoxq2;PN-1zKOT&%BvX=Pvc2jV>{>JO zG`U3#56oN2>ao2$M37ofL%_zZO4W9<5_Z==PiU3(x~^_5=yr5w>836~_x6J<=B(oA z+wTjtz2~lu^ovDR?bq}Spxd=`7-$F59qj2aX-?pJ=;wLbq}^~Y(8oq~ z%9vLaSahUTBXQKFgn4>|C5uVuu`;vj=a|B3FU`Xj)JmKvqo!p=X2!+F83sL`?KLDr z+UXlC{qT;9@PtL-jkeL8KP%|7Zo(ye8=J!ary$YOp^Kvil)!r_oN9-O;C)L!7V=C0 zATs>oKd`l+=qFn#?Kz9)?4MpT8nxioM=G|CCOb@Z!FJZyw^S#$jVRNjOT?%DWa2ms zs_M3su#IWrw+$~?n&Fb1mreSeyr-ntS^-{dIGcg6E5+jRN7hYj}jA+r9UjGgFp&#u$Kx*@?UhvZMCs4R)R z8b(lH-VAnhj~g1*j;>6j_aZgip`Q&q0>)PK5}A)!FHBcIb|( znWK?G=)25trLHj4?&@~*r{*hQzTT57W$c#qI){6%>miJiB7(oG=vSa*n~bk%@?4=Lkhpv>V$=UtK!D9%ZwLgMM=C> zV`dOg-`X?L#NBiaY8BrrYGmJHL?!~ct^wstxBZSzqWM5Oy@-m$YPRBxT75fcl~U#3 ziCZ^O^FeiuK9kRgAt_K!r!BMz&TmC7-8uzA+=PoGp2=*OK2`SYE9h*;6x+$b_3<0| zE&B$8-3&Ji34m!zVMtsj$8~(*OiNlb4Jw!zun@v`1h>*89P9C-I_!h)+jH;?t%J5& z(6fc-PF*8&05P?BSVx)%jRQx6 zIp&GZg0Np=g?PT*nwz+c)U`8171Kc5_7wxrDh1YFZ!g36oV~;BJ2#uBvMJlL3A5*i zZO0Uz{d7m4;WAa}{a5qvk0AbSJL{b%T^1f%mGne&;D9S(KQ@(;yOBYNdf2)z#U~ZI zQ1)jYCYRFLZW+VLaAreUt?EGKPUZBd!$DYr;DT-jUU25k*T?XLnF>vV)bgj;Oj@x7 zp7gBvy4SP`GFMC@u9>vDWJD757P(2}N@YzwV)-r=X*rEsMX_JRoRwiFAK6!FfB&1&5&8OM5KIMy{N!oA z;FA3w7lZ4)EA@?)V=O`2uDfpr=ut4crJ|PZrfW+dfk?Nc(qEU9tydHoY;tj@Wimk` zVYz~L3$*S@FXCI>sUTLf&-C=ueTPz}H+z3K8SZO?iL<-&Y{sgVk(d9?m9h^rD-Wp0t+$Ud-ZzrizYRM_P1*)iPI8>#hh{h_*X}K{K>27= zZ%I7*)$-igV|MHcUxgX$Ax4m`cb)W+M`IY$Ux$4ZrLj`TRhCzdidOe_vTx=!nH}z6zSZIXxg6C){OpxjPcuf ze2k$U>yo|qEa~1Msj6>N2M0`@x+U21J!*)2jkH&v*JIg$mEP)miGdDv?*$`gRg7yU zYg2q{@1!WyPRVDR^s6K?E^?3kboR62JB^;_;y$OjybDCd27L{k2j95D^uF==cxH0H z0)~R=mp|)|*IFOs|Ge5yIex3qNc~DLFDtpOPSCF)w@Xy@VJ32Uzl%M4Lo_> zuq)IgyhY5h^Zfr|?<=FC?E1Zx5R@7W2BbknC58c{9J-}MI!8cCNQoo!l4yXv0nJ#slOdFbEteu0emi#e-m- zr;IBsW3U=1@;`))nj+qpS%fzhECa=B&Om|?j=hOU$SILl#9 zm6cp7!;!KuDf6DZBSO$GCB!Bc-^Ovbzg6%V@CTwXdaS_sf}Vejzb3R~DNMZc9{DfY z7k$H~pw+jW&&@fR5NjQh6N{8L_frTF;(VcSRoTZ}!*{C~?e73F!>GggUPw(<+z}k3 zuRDF&c4aY&rg0uY`@AVp!KTcnSz%hyUz+4q7YHwQ<112hEbQqR$mydBudZx=%cvg* z6!(E=TO@(cTynDfxRaS&{ z-+a3hi1({&hRo+`q$Vt>=58R~Mk#$3Y*tFi;e)5nPth+xdoG?Z+vVfuw8$?kL$?$> zYxzG>!wV=anHuTSTpXgNFE)uLNXEmZxq9qG-0$~a9>3i~m>Bt!hJ4pJA=pz62uhDP z8H5+Rj?Vn_3fc;_lRUc}-W8}FhMKcHmO1J=NZU)mCVITBi^nUD(;;bo&bRH`5pzhc z@b+A>yOco{<*S7?c&*?Y@+=vy`R@D)Da)Z-2lTN{G%YGY!RdEeKmv|xH`(W9G3vY} zQ~PnW$-i&Bs)*q9HtZEy zaD#-DmvxvAZW|;k-|zqIzQrRy@pa{frOH;Y)<&CV=lMGnW0%_B`Xu@utb3*`R7IW! zRo%BTEbxuZ`OfP|zI%b@xR@>2<`#x&&=)`Q-ulBG1sQL)PW0jR{j5pz)K0->BKPKX zcCW%}=Sf0$NOaC7rWtX5{_t^XFKG8rrlFT-wNJla^0Rw+z(_h?LfSz;5ZBYX0b-s+ z*Gt*N9+$awcNQjpMx47Kdf~gCqA^|aRw&ja0UKV$;mAsnoKbs|a*gZ+9>|(&^loCc z28CmrKffZm{RE?fxRM$Ey=9vT8Gb_ow=c41%n+ze&JFto5;a|a71+9d<(hjpvzu~% z5B*ykSfNsx+&e*}t?j+x&#D=%e>!L`@bC7}bY<;4oU%@lTt zVf?9XR~=gUF7w)|aqf0L|O4 zZ%1%C={nG*Y*~-C^Dls;{G`yUDOCvdvZiVe&mV&NqIZB(qeH&E!Do5--=!&Rg zj=?!qA##a}6om!|#iOA+mSciZzUE~}E01OQw`UeSGOt0Knm1aVL)4MMU(_tzJB%Se z#8$Ymxni>TLCA&`7rY;xUaurv(Qe=S zRVZENOSmKboO{12^J8{me_!&oJ0f_`m1}FxaWe@O0wLBerp=QSS1`7H`lY>bKG!jB z9orhL(eMH*weg>crAgnJ_j@WA;F5M3F&yG8j~{|z3e)Z!;2U|3N1lDVg@pY47IGBs z;w*~0NkjP~kZ(QaS+P}=*Mv)Jmd6`xoET(Hg?>Jno_hdO^pTBnDr#Pt#$Upp>#&=5 z%xo;(ws33vk6@am!(~izIMA>thOZ2Qspj;z)SbRcerWSI=elQt%seMh%Rz&sP6I2^ zu^U@z45gxM2J-zDGU~_KnzGFrzOP~n=`(uy+@Gc(D7+`!-QSe;?e((uO0r|9GSqI* zCrZ5;HnE-V5s&RCW*0pElSz9dn+3~H(d1`` z!>t?|E9?~dwln|^FfFtFuyM!Itl8^D{b;X#G23lCTAA2*1+n%VL33D@=Z_z5qlw{2 zhxIh=83lNotgY{hV`J)*9#$S2TUT{!=NuX{QuG}#8by_ltEbKW{mh1yaLjE#ihw@s zqBK`Ny_^VrMVP5xe&ZsMu46WWQ5|FUoG4!Iu#&%oGh9EeBToWHLH7*-YPd==jb<>p zC}b**YQv3O0y4+(G+&^9GSc8Jw6y9Q%9o9AF4Q)&@UA0yj%;R-0l(Q!%f058iCGS) z=Z&Qdu#axB0U}+C{ zYBImxMO*JHY=(_cHxW+)o7H|us|#fxHS$RtmARY~>KuRni?+Ln`;XR9GMlesATRt8 z33HATl-MD6Z&Af8*FmC@Ew|=lYaGG zkFgP<`}|Hqc8gIT$I((*iJqh7+3t;SspHu(V-BFl=gJ9GKGL{mCmfd?N9vRL-lg~59qmW_V`ocETlP|0v1lO-F)gbxi4KD1 zSv$1zd50*vdcfan0Qoqo)Zmg@j0@0OZ_yu5qELkBqY=wV{k8IZ_qPjI>kV0MVLYoQk_6W?PZs9V*aJUBrA{VM~Cxi zP2BuvJo6fK4p;K*?Pl31xs!Q6j}!h}NZ-z@OsZdp>GE@P43{_&=63dtZ8m;iIbvGk zvFt12b8F(3v#*L`!VdY#=w}lNhtRN(rE$HIqV z(;sG}HqglkAlDm4XgbpJMTN^Nz`^2L$Zg->cpNJnS`qp^!MJ{f$1$K&koLGlxnLZ3 zZX4oHnk}mancDYHd z@dMZE@7ULQDSs%Y@n4dQuD>Urn5a}jr6umBEZSdJdM*`KvAKWAAP`_t)G3s=g%iK| zyob6-%uA+nj%C-Vi9r=7pO&+6G?sKh?GtK8mA;vEz~+{ps|NP3CEo}AVeIFP&U?A6 z?}h>7D{V|_%%?rFP~2l&(ftFg>2EL)=P#-;6BTmb$ksUKwN&_~wcvlhzUaZoPYTJ9 z2CrBpgC9~+2_`i&&emP>mue8d7wBP~kl(nHxP*8d5uMhf(e1|*+%=rT7i+y-6YAc(8eW$IrK7}AM;0^iA7aruVBgy7B>FOln^SbSC1C}?Nx0JIocr_ z1?f=mRTlqT4_*`VXD`oy1!OFpwcny(8Ux23%C~hdJ(4eC^0!JoE~D%vmsv<{v!-uw z1M2wAogGTj>4;^9ci_T_g3zUq&s0P^g^;T-wG$#T$F3m=PP_}5q0Ld`n6Kn z^##3|$TjPS6{r0mzBCGlx<~e)L&H96$sqT2IirF7YzS}j5_17FPrZ;?V;X?Fi z8`q3pe5)+zXgMh;KiyfXG_9U=-BDp`L;qrf3Yp8VSD!diQut%+zmF;gwv@XsFEFh! zIo?~7jhYl?`L$+{l?0c*ZDOu93?E@(8SNHcL7YX;g(Zt)BHz6IZb0<$^#c$6qZOn^dMKOuB(A5ejkt-FheDc|yv<8Xq z7;ES`pIc}RSA`v0FTjgc;z4I1KifN-$T?yH@& zow%-$<3s&7EBH(F@38a}_gVAx;Fo53&Fwa_E6TMKROED%Nb>pV-gBa|r6O=rJ!}*w zkonREc*tIvBYpBVjtjZ@@pvUl{0H>zmi?TQ zNYfv=SHLLSW&p3>ZG7lyniMZZ2KSsDTY5R#GcFbo{v)|;=tTWviVeryQ=1MV+o&Ta z0ODIS5xA{ZXnAnpdp;J}EYo2^i@${hMwLP$wYh9*EY;qlYc1-o564&HD(PZ+KtUZn z06KN) zUdoZssT#=i-K#x2I;Es(I zJuktV1)DW75VkWT5?R=sVrKM0?wP=~;Jiw}>t_Cjen3>K|{t1kEuh`bTNX&K{>eGmgRy)-UF=H83A7<%kBVsk4M%SV&e%6jCLMcFaH z+`7e^9_%FyVPaeS87e|s(Mh4%$6kWwcfeU+5|r0QEzUB}LR!PyQhnKdF4obKS+qO* zuNtd#5(bgCYX^H;;t5D(xhH5gV)O@EDHk>;8Tp{_fRIIiQ+xUqYti^t5m9Z~MRGYg zyaNnA6X+~M$U|3~+OC+rl-uldK6SURU5v9~jwRRQY7CXKxJVcOP}1AmJ!!q~IrE7$ zN=gOFVzC+}Ui$M^(qM$xwvQgPbx<+}+6y!V1ct^v*5meHdQVSxmvdeV6Kd7BXk>KI zHChU^KK^fsrNo~QLjpI`c=8_`{fP#&KR@v zFZj=GUFx@iRX6Hw#7u^q@vitQ1sONLz)Baf)_~?<4#t1!Rs< zMf4lfJad04ca18O;&cP3>2zg}G*->|X1YiNQe0mNvlrM456{@E__?4YS!Nf2j$P8J z3$G>hiaM%^Q?i2nYL3Pr@Ad9g)d0u3={p(6V*zXX{kQt4`-I^dp%>>V=y>`0Zkc?z-%TfC+S zdJG9Kv!)M&ImUvvN;TM4u5afa7|~L0ds`H4b1V|GmxOPC+BUXk8GA9ipk>ZY{x?BZ z>h75}WVo!_AZfGq9de>@(ot%0f>3Q21Len@mRGtI`aFjn?((k*M1U;)eUA<80J{!%)K%B~BNJg*IF4j2CcG3;7dwUx^)N6^ zg$T1}&-w|%a%g)D#uZPMMH-uGB&qbxHtCtC%5e|Ud`Y8(LUNNNC=AIO;I~ z^xh>857!L2U6ZSr8MgGoUUo_IJLD|fB_@=VJvjGDy*yqNN!O-i;$Vn!ZM?{JW0tHQ zcgctg!Xve<>&;0`Ss24!BvW)vZM$TYFtl?()~*bl(dNd+fOnLSS(=GK);HBAB$%G) zVhG+UBfz<8Azno&=G%Jgd7AyfSaB_Ci&K6Q(ytCce)Q*k0a;lRJgO--2X#*0z&vzN z*bFGN-1#6-s^@~m9>wa9;l+O-17Pjk?YCfsh)#pa9KYLVMVn@<_1nCT4-E(3rm-%c zM8fJs{oUC#$#y^3B-iC@{GppGsFCWNr*}T;{Up)a z&dH--K!|f`wHeaO8IcKVER|SuM-Y<-()JhIqWQbZ8eTxY_yIV;+{F2?!-iHN<@G|Hzg^No-TFoD@ zrbkF}SMl&evW)Y!i(CL3AT(?{x(<=Ui2>7iW|L zODE@NXy-N8RVzx(_mzU#*4<5sAAD3LBJd0p#2t1$1FZo(+1;Ty??fl;kCP9ri*|>A z3^ILSMCw)!_y*!q61GxEN`uPHmlTP}Ev7ysHu4%C4wIEjrDkj z>}~kIDAd?2YW`JxpjC%N!Xk^Vw+!pt1+&dz@8YhO8>p2=a&+P~SkA02ynA8X1fs+B zh7A!+Q8T<<=nzX5GFHXU4z1*L`YF-2=n}G-HA)c}!i2)hdZBD3uyL)ek`~vRIoQKw zrFFO(B1EfVLS@Vkhsi#8*T}V9-fth58Y@Rtw_mZC2wwvL(WE-%21x|1eH<0;5sHX> zI!Zr)_wZJRygSy?^XO5k>bRlFF(}xsfPJe)XLbWn&YiwkLRV5q}{cL z!m4hPQ(vmxOZgE=eT&%rFOm6v$=~IXO;RfqCxR@FcFBsc1fPWZnBBo`>zc znn)2f$StM&LSVdZkr6F?{RWZzA0Hr2`n%G>w;t(UZD_cF-4dVl;NT`&v`oIM<<@eu zJiHFDv$A0m5SoSUm#oJhM?i1%Zj<1YoWoD4Z-QZ2evKUmDV|gBo9E!!)?er?>-$Cm zb*xK%{-`%w!t zBR|fMjVxL|9YaIL1nDw*%=s==BTP&>wl8~kVf|brD<(T%9!a3HHZpmeb6I)b6trt+ zzQ;4pZCE2WUTr#Ti`{VN9gDD4-6}c^m}KI|(~hOxjrDJqIVN9&W8S*=n4a!OX^iDL zwZr;A;5Wir50@9>;etBB78{ykbPrs26IUrXW5!~|p13Ot1I3YUi3 z5$L_@Z*{&87(7m>H{m7&)T)5D)Hnel`S2RQ=o1rVojQ*%C)BfK7BOC8_b0q(NEehD zQbn35*z63z#9Zm_9f|4v>?EfL-m@(MDGAD{42|ZfWjC}pDh(|>)~LCG-+dHo`F)r^RLzOsvQCM4HF- zzu+MLA>$wmjLeERikQrfr)XVmz9;_zJ5!WwUW3A#WWpO4j6;Ojg?^&3!{Pd~$9{Fw zD+qHO2EA4ZHkWB$ZX(vDT=5{A41Z3rYGz@R^yCva=}zXJQkC zx3w|yIolB$OS&-y0IrU5f09x(s!zJL#ZHescP7iOkWc4Pn%nQJ#ws=Pg7+tS#(UC< ziD~4MQ_-`a<1p!OP>lCPifemIz+q!LLSt{9DZQ4yFs_Mmc*7v`eNO($l>6~^(T*ny zn$N?ER!qsgG~5kOh2rIFWCH>I&DrElg75s}E)AB72sM<^G4v zd(`?v^@z!o3f5>8na77L_7$#Kw_=w7|C}Qpzo4GJYL~a`{B(282U0N$vRk5<3b5%&cRS62P-24O4v_P*UYp3E&DFf` zeT4DP$Nc8e`z)?;S#eyj4-H2IzBO0#2Q9^W%v_4~t%5H`q<9uX_~*H~b>n+9IsHOb z-C5KST4)u^ob3^1GUG90%tQM5Rf)Zk4AAso{&E0AhUkcxEeK$qz$+|cpBYFlI^Juw zcTdGX98P;ypBUa+n_q$2%g~e`VfEM5HHZPYgO#}g@zm`V>u&XAAS0*ZehTQKibK5p z@WpGmp~hI2q+hrC{~ihIc!mBg==Gka5E>y0oUo^$jY$Lb+NUnGB?wi1J)jiuGNlYM z*SKV^QggMlcI3(}v%KxOq6dDBsy%=+TTI}HL=*<>=6jcAcu0|WW}X0erjwO846qsB zacx~o&E+Qc@aBg`xHBl|C*UrhywldOGpPnh6=)SbTw_y~QS+5MQ78FuhX(+L!6X65 z`rZi?WmDhNt_;UG~kNTBfE^4eSq1WLg|)C)vyqHX2ulnzR4GG@6V zgEC{*?RgFe)v{vxx$ARlzwoic$EQ5!K3`6mExuRhBQu$2oZROqskkM+Vu5mp7zvk( zY94HU8|CpEsyeo(*Q(5Gba&&~`cbZ%{v@?ZyiaFpJ?(kF3e~MxYI)@J@uJ9ppo+xB zpeM=bwP$-)eV%U4sg)&c5khGmV|KA+GO2dk9*alQX8~6A0o`KQ8=8wdXMNC}#>9~3 zgQzWYExGEjoQHVOuh+X~liCN#&?~oDo65gyB4T;2?lOd^@w*U#mC=%Rc8tw4rWV^< za}M-vgSUseUdrFfWx}=_%X<{JEdkOJUz&!+>XND@W6o13Qq~g`j#>_qL*uP7zSQr2 zblVGEd}I=9PGx=gQWp{XB`Wj^L(KPLJt5@X;@EX5GPt#gcyWqdqNb!ZGHDjaOrK#c6KE_?e9? zBjQ)#?GmZ>W6cB<)*iP@p&4%Ftds2J#U8&QgQP0mDS+CPLw({Hi=F;&EdbW-^u+nz z*7*8!*X*n)6JTDsjK0rTy6RXLpmq*6N&5(uBTDG_;|r;RW;nl~%M5Naa*N*7b}Xhgh=<2)qSYB8ekCjcjnae^ZpvPzY4wUxc#WYmc4@gbU!qmJz z`-Cgdjprdy*mV-TtEk3@^pO$^R4znwe;|rD#8>cr7duPBVNy9`B+8!MXnDhI?57~? zwHVZF8ghOCw)^S{a5!6*tO-$4j@;j(5+3=O=z?K*yD0%B`<=d{d@e1{ruY)ZB96CW zgyYo&Ef0a9+fs#19f$O}oRmYWW0E$fvF|BF9RzBPUX)ld7 z2on$1nnr@WLB{>|dJEm`hRD$?s^q+ITp*B;`i6xu3V?R>K@|BczppU$p?Dq#AZlXj=v} zrBX7QbpXpKCVG}`5o=row@7ntn4hiWVBcJ=iX-=H`-RpO+Z`4NB1}j zQ45=FKiz^7Ul-E*Ey=Oog+%wliVa?hU-^=o26|R+ruyiGp{4|Wwfft(LxarVhj&Bv z)nRv2IwhR@3)!US7&4v0ZdW~58JBX?{J1lZsE%TQOyGDHBDLFNIdnKQ%M5oy$#?Y( zt>Ol|U8CIkoY?Yyl4dp!71n*3K*kOH+-QL!5f=q$GucHmOfwy=`{+4DNV-v6V$e1E z9?>3idT@fS7N&(fD)Z*yF|z+!-d`$RuRI#kK!LOI6U&5RK!+u}&x2hy^Xr^J73-_# zi^dp_GAw;0i|syrV4vhXPOQ1B?CnCaF^gu-QEk+D18F5mcz+GSI-nv^{NlVgx>nb& zxq9saqm#TyO9=(n`M6W>ygiQ$QxA8!Euy|(v5=Y#K*YohUJ8#@ZS z@%cJ37O%%ms=V%Z>V7|3H`bOXKHF^!!M^uVziyzW8c0kr=7h0CtQ8G(27k>^U7P4r8LtUsPQvHjrBUQQK{Kcwb9$ z*y>B?L}9mAwl_-?eSNRZShzH!1zFA=f%g+Kr6XkOk{iU zDlHFveuq51?lb%car?(Yr&y1K^FpgG4EO~ zdN~rLZd2snPcWLd@7L|uZe=Zt^xR0bp}oEsMxPu?#q+&kWlXf2{hZ8!fpBvR>Q0dd z>E#fGL9{J14z{dRWMVaupuk*fB(?P&AiHu}n8}~sfzfg4Fi^$?+pO(uJz}syKA{ch3Syc! zRZx|Jn`whh5kNuZCXqzmI$yY;}zSH1>L=Ge3{Ije+#Z zm+d=eG(01kHQ04yrkcC$o=0 z^Bu26mmBCi!!78r|RSHvETi!#+B@$U6=Zi zMcm?W!al$@zR>JymAw!X>Via&&UL*>sIMx2aK)TcqPVLNJFx3Dw4h*PAU5q%Q`6O? zAkLu%RB#rI+Y_OKE3amyY+rWV$A2p@JcabU&T%zt zk$_5tn!KfZ6Yl=~tZ2&}V;R9F;>X?@2N?-~*|Knm( z^L)x;L7aXTdUI2TN!)wucV5R^PZbhNU5;66hkTBF3u%AJnHR)wDkt;ol|>wNOc`G$ z?hB?Js$CZ0P`u07s3XsScx#1z7jl!vPvP8;J62j6ob|K~THPq(cI+!o=*tT3*E8Z< zVVl6wwp~<{@u&ZIaSbA`e-vHQcm+q?<1-`yQDWQt+M#{_kUrYH(NfH=Beblm|3i3_ z;GKBEj{ViSPiFmlze1x0(0c-CuKHVz3)s*4JKPHuL3=DXsO*vW{Po6%G~k1Nn*8M0 zb(Bv&Tv*35XJ669(96PE^!0Mj<(RF;d8Yt%7Lj!S5ch?O`01T%X(xYd!pq>b{EAOh z$Du?f{cjR1Cj&Cy`^bIBA5$Pn8~i#U=xqW$?3qWq6-VRg#zm)A>0g}2gj&Uh0qC$% z)HT}j(72{%m&THFk)gI$8GmTM>zXF9=C|LcV9gysEwn49~oHn?3iV<7`24B73tWx(yM^?nvDx9$U!BiA8?GyqRi8cEl zRvhEEs*XM1nODVJz93vis%uHq-}o)U1~%6=Z&kk{%3+0pj1aV~2o4Z-tQwuQd8u+M zcd=d?O=E^H`XXckB&X57zn2TeHNPR$OrYlZ-L$v56ZYrvPUANU!gY7JCTwEQ_h79$ z(h5sjk6cGI0xEszw)}=$BKIA}X!;GPHLEe-B0Na)=f>4jx;EZ!xY49iEMoi8X062O zrhc1`k8;%&ecyCR6^<4A39Z`#{&Z<#a;{;)1}iMl&2({r)Il; z`#!y9GEL~IaG6u<3~9U-#s|{*>JNE#Z;M=9q~#I#EP%PkU+|Rd;`AyaQPd};^|DfR ze1fV;)3oW5Z^wypvW*{Rn>4?K7Fp;fjfdlnFb)eiq5X`FGz5R^Eh;Z37$XIx;Z86- z>}hL~e{&ytEArcjqQqxkYo8Q1H=4z5K;D#$J!*{I`eMml^zNjP#Iyobs%BULg3Yq(1ufWt<$j=a>rFZUpqyYh3>IxtAqm2Oe|F z!jwH%rmAuA-EQ(MGb29*6_{qJ=z=cwDCa1iekSALW1|8PxdC4VF9iFey-jQYiUBec zLVmY#*LPgiZ28h1?tV3YnGtc8vE2Z{btS(+n!3wG+Zw6M9vkZQF*zP%Um$`NzPv1@ zpOwmvP&@g}YVI!4I!HRZusaCvUviIi5`Kwl7&fec8cDUWK-@(?1nmfmGFSbuO!M9D ziZb_p&gnypb_TVAW*BN_Lz~cU!b>4BC$^~k(rf~-&yRQ?k4X&s+?zu`av((XQRECk_{&ys~Dw^6qrBN zZ!8`@-A{c-%+dwNALQZgBUMqbKTIyowEp>}YuPT}8mT;!1`GYledxxjXx7MfU*M&I zPjJAyd8Tf0<2ujEdOKoB~0E!J8oe)`+lhvh)1P*-$rs|`Wx7O z3Kf5fUV58&w(0OAK}G@;=q{tGK(V!C;g&jkI>tpAX(;8C{@o0th9uvHkb-H1`@1|N3YD7qk3dg69B75JPs0T9VEG8fAYJ zA@Z7_GPZbi(_(l{NaGx>0Lv%%YKxE=1IBRgu)m9-mp1{#=$2P`!kIk`wjS4lVab0f z-Qj?&Qyy;Ue#il#|4-s~3uCCjfB#?p;a`2(AfzDu=|o6(!uo$%Kb){{_R7)R<1hjw z8pfk_wQ&-I7ePhzzODDC;EqlF`|~jkw~`#*$nO#+4S86!B;FS9BbV9-XzHaSpe&EP zc;!}RljIdK=Ka4VGZ0|FQM^P0QN00mhl{FH*xRWhwae<3`R~-e{_6pw$Pfg!$NlI+ zY@%h11t6reSbY{0<5YP3eL35{vzm~r$36(#k899jmp0z-LkE;43fVn!A!z#%#5T4p zdZy{=OrHI)Aec+W|g!$zrK1Uk~uIhh3`u}Xwzu!Lp z*`$BF^Zv6*|8}Rb{U?(CZFu}oB>mf+CjIfhBk3*#__ag(2S01C7Uy{s&I53?D#4Vk zdb{Hv<^&Dn+-$w4YrBN-P`~{xBC~Ee(eCTVHu<9x}O$7v zgB~G42{OCQUj+U2x$Ev4KyoYt`B2p)=Q{5sZ(sYc<$;_WDALa*b|~@?RI^I4T)|tM zGp9h;Tn(@&2RIV!G4EfJ(eU{Z9H-L{xxW<2qA!d}ZMAVO1JcgjU)N~l{*?Ae@|K`k zrRtvIL9J_e&ne^|-rp2g@3Wvi+9!box#z$HGaoxSvT@Jno+!30Vro$~f`a=p0iAPQ z;EA$Qy3i`E}bsFk#6tEVa&3 zd3!61AiZG+bida7eC{Ow!`myM5YqYRPhHZ}6@#mTIGnxj@w=4*fFo6gEvvvNGyW8a z+vkAs#cfmYmM-Vz7vyzH0gh>ZncYA?P>FA97b3m`{FAL)PJ5Gh2nxeGL|eYvwLWg9 zPT%&cgx2DQ2V5=bR2kJw@*WMfrlrO5)tkG15Dn15Z>a0%s)_vTv;6%qqWK$fS%$cXuB)?0%n^WfJf@FY{`0L zu$GRwxRcnhyN^gZ-EG)j<2!qS{_-}Anx%LBI2OmHFY%hi=c&=y(-WL-(B5>-%--{j zdDu0puSGkkvh{XVk6e@Y|14!L)BpI1XbreDZ>P6zkr%*n>aIH=o#uhe$7_PVSq=dp zP`lO6-SUMX{^MAGvS+u{`)T2eZj;+$rwXt#SM`~D#TCJJ0&s$N6=17L4kaqrg=`<{ zPW@7x@|uqp0!{blKvguGE6uNj{1oX}J`^z=L_A(e(Ul121T=NWWda`tR)6ej6$ocS;Py-E(&>$a5P0A z2(AG}u0Jb@igPs6%;=jt6Ir%7TVTW!z{Fsg;6T~eU1SKvSO-RmEh7)h&(^`&BIG<( zvjeu8ja#1B_06AS#{NWIJM9OA;dK> zgP|Cj|D2QhfB^iC{KBairA+(#G!*8?ZW?g7;EkIDi|)xw)B$efPrDC+Ed3lHlXbRE zmNf$(kTE}h2(Ke({{a?New#1gPv~sDBijSw>FJZ4N-;c|8pe+0x?DW#B4I=~n!HlOiVipMK2@v*s%d@q5FVq}B<*9&2Kh>(5_$ihG4Tx zhi!#1{zl&a20#%J=Z+3YQHZZK56jMg*s-ShblgtCJxuoba@vi1q|^+9#Z!Om=?bq) z;I$dEr>71KjI64(?AESL`=B@kCQn9G6*Ldv$9-M;mu*=gm;e4C_@AdJokMz^uUFD& ztnd9*zfHmZXMsRKV5YNelRmPw5+oZ}+E}1{#=Hz1OMG!g zr=BaHbM#0@3%|`p*a7f>IsnS(5iA@A03s@d5EU(A$umJ%9*Q(u03ri3eqwEs!8Mk( zeH3^Cl$~G}F$ar(_4rKFZ|;g6XaW~ph1+x|r(;iCU;7VQf|FR2z+?hAri2LTeexaj z)z#|=uV;Nn{#md4rFW)SYkQR5Rvz}DL|oyFFurJn&{qpOQdd0ED85a`B0 z=Ed#0h?9v`72!pTnFxlaX7#Oms$gs+jgKQf0t;$Qt?X$lWic-V$LSl>9GSr zMHOI9O;~u%GAfVApY)tx?Q1&LQeuw7L@{W`6(TOXOMxkE`&)4dKSEkTZql<%aKm_n zU@HPgm5u`kMA9c9u*Y{w`$;4jHAgg)SjnRefLq~fDPdL@wC+?Uz#FWJ$wz<%%IW|} z>5Si300;1F06P~2TCf}WYLQ6VgI1|it#Wm4!81Wf1#QE0;nfk2n|Nf0oaVarV|ry!UZSWpY3ow~{mUoNp=LzV7b_Uf6=C zvOCmi1Hix;j1mb7-_Q3(ZD3blGfT88y>{QBOdDW5-3&(jg!B3C$AYm1mBt_I&&D|#H6D0+7 z@1N>x?ZmKJ_qhdX07W4~mOsCSmp)7S{O(zX)*mS7zV=ULkr-*+Yrhi^JwvdJVKH5D zf2%I6GL8RLnHT(rbD+q)DT0h)iwO4`)A?R-+t$F@7k6_Bd;{-I#+uj3DY6#rRrbCrmS)pj~++y!Wf7e3wFSssYQ z9d2WDRAIGc0?ujvdn!O_+Q6y?!SA!P^v>4?y{vD#s|TCuyMG#T@ygv%&T|MKpF;Hv z#pnmD_}=h-0uoYkGl)jVS_-%!5Hf-|+!YPcPvnma)oxSC!`8L1z<;{_Ad=<4Se0GcXj77W(_XLhusX)p7g6+@ zK)uRAeCxGG(8mVOe)L!*z}E6q2u0VBmu5`o`y)kh{6x~gpuQPQuXGKwq0F^$wEn!W zk5#QTz$c?@@sesLyYMa3ZJA$($4akQTdybaI*TsM7n_ldU&S5 zE_byR7IAf+hz7BV|q5+&b1sU+lp0 zdTaL8dE+{_s^^_K!bZKTr%9y=$fw0+qedN>5}H-Q5>jjB?E;QeQB0DzTaaQO118=y z4ZPtDuPDzw{AXL*=$h@4^pQ1X37XfCIvb5+I(JI2%(1=cKTCnkrL4RM2^#L>45&Yo zmkd1YKxzCit0NT%_&ZSga9s<8v-_l?S#b02pt8_yNm!+oIRu4b!H4Phu}g=N zEKEx=SA&+m?jEFeAl-*5eU#C5AS?YWiba_E`-6P_t^z?|FmCqOgN|pt zJ=&JK&0L`&u8DuMR_(hR= z!;4pe<<`Ieu)}(f-(5{;-}M6VD)N}NY}&Y=kP#yu^FL{Ot(ULE7=;Y*`ZyFSE^0aP z&E;BsuD0=V5E)DElUG)Zce*qwIM#0dvm2-4=&hGoG1Ms?efb9zh^f?w&4xVniXlB? zT@2{I;K~5)SPug-g7x-ip49Ta$AIqnfzaa6iHJ@eW%d12Y~cs+=ml!%pfQ%sd}}`X zDgf;q0K;zM<4K@%QC{l%gm8_~lPay8WR6^aEt*T6DfdNe)_U!$$c`$a7Q2!XoB+-H z!6CtUjk_tDzc=s)F+HQ2Un%rdDee8#(>R;%5BVZTdhVV7Q{DZ4v>tL3q~%D*2pk!T zDie^!+5s98($)u=1d;hei2m9;khB2;1e`E@M;O%r$EleAidpOufT|sb8w(<+GZ0d< z7Ua9Q0fL*@4yfcl9o)w?U(vP?`n@2HGw}HJ#ZZCPvNcAXps4TWvP_U-?~yw51CbN5 z1{h|h{UBM>J)Q*0f(xS(!c1$CWKg3AghKtU@4LWzY}k$Iv#)YFlf=p8>2uwrk-&0UqoaZzffT-^z#-pE_$Eyn23@H;XJad9438wR+Nh z&5c85qPOq=NyvK49#okg3jHv2bZhX!&~VIt-F45einQb`68VA4s>r)6w{DaDKla`; zsL6H<7nY)+AVm<6E~p>~k>0zA3WACP(t;q;yFjQRf`}jir78#r(o5*Qw+Kitp-JyO zl+fYadCz(GcF)=8JKLEv^ZnT~`N2RQ@?<@CS!>-_(N)pF0hH{BBJUos)O-FJ2XGwU z8Q`Gy;OR9sIyu^3#Cw4a+tnQRh+y^DcJnTPH@4buyWpMuWRC%bDMu{`k*=9aC$p_0!x_vVz!AYl%Y6ga zRRcRS2?ApLMbt*OpK%J|k7w9f>PnM7ib;$*j7>b5yj<1cdJW&qvcT_@w;b{G>*bsZ z{wi!UQ-lRJv;mLWRhO6zZ!Lu7zRWHf$oL@ETAMMg@+`x>2W1P*Wz)JFgt&t0xX)#1 zuea0ag1pGOH;}$k$CX)PAr%xF{3n6+kykb{GEGxB$5HAH(3|Rbz)SpM%L(vBbUOe| zIcU*Y4)Lm@LZH5%iudCIomRVVx2RW6icnsBKo!oRsgjg?{AJj~9*}DVViw-ZyxDD2 zI21rJC&&RU_ienXBIyBAkz?@WuO+0GL^c$^TWxRmYS2&53MOI>UNrbWTPnb#tnnak zR~q? zX6Vl1HJkA5-3$wXeEeeYFo<=tLE3#Ygmq)2clszn!M6ur7Zrj*qSYFhaFPaV(2*Bzw^7XQ?=V*5I#PRUpLCw^WxEDBqmv1a$450vbm5-mN6jtxB;&i) zI!w~bsmrHu$A7x_f@+AGt47=>{;HF_?W01BE>B9kMcXZb%1`r^R;&-U#-ii3Sj7aZ zOctrr;biFh*9-on^#C3xAE~f@f>+_yH>-!bx=)R~Y^{imfB+S>#kkk5jld7t`Jk~G zeEpnZ*v08nDHPM>B_ z_rj`7Dp1VlY=YVZz9VX_n!t3dcVJ=C>7!-V8Q(D0BzLl9%ZdpONnluWYAjAMj`ZuA-X5x;gcK~}?$Gj--7*f8F2)smj`GnV3 zqc?W8Y%QFM{;s5ys};!HzskMneApA0svnb5z3F~1-L63kpH&v#R63WU4ULakgBx2)&Y7L^M3zl4jig>zp|EgryYgpPjPSDuQ7YsZ4Wo(L z;&=xJQ1W(@c@cZ!O`;aTQp!u1uea1vt-low2W)0K%4>xK1@7BFSP>iDCI_Gc*%n0K zAaIIzvO+UEsp*22DBTw}g>mX!R)R@9^M>)ZQLUnzKlznU9iSZwx^KtaHbC{2ti99p z=m2VzTifO=W-)0_Spe*X;nM(W`jfsOF~A#@M6_kK!$8qY zKXRS9HNDcSb%&cWY6}fH3vz3bCL-F|MK|%-#AYOP3YLZbR5Cg@2?v=eICxTh+iGJ2mNI;EexB8IefBqUe4@D1HI=~ zxa2&up|H5(TYOK-x?HjU_17V72ZpQv!FKkKz0W-LFSwP|R_j0lVYku&IOT{*0Xw(< zMW#qCAcI0%H^Oosbf9m#7lp)a;>{Tfe+rV3JScfKhA$DoHxC1KL6NZc(Ow})YKx2R zm9*D@(bua$!tyk{1mAxI9kkRZIFPHw;`>PWYK)drtkIX`_|SFbO1S-J84ut-8!Xmy z3AnU%DG^0P6bVpu@D`-a1K>Qp3Gulp)(#%rA?sH?_xmVShHh zUu($=Tbj+@Qu&uA-G5T$cs&82v-r_0M&&=g^y_8NN_;NS_D78?JUmD~o|a*dOE38` zu_ZBHHdl7*b0@8)*r1W`m$E->7*MfqMB)ctk6x1hpM~tN8*w7rwp*LClKI0m8U!|t z4nU<=UwrtZ4H*CCFZv+hVe4nUX#8=&`vKl5Q|7~;$2*J+fc!RDS)<&$M&>$;yRa!Vs3= zZu?oA|9A)`w44P9zkP~?KWr(04|>E0?sm9=;g91ZTm}GsH^vEsM&VSty6LgJ?cFd2_uR6YbO;Lx7 zw8M6GggrV&g=T#YmA+=$ouB&Q8*@K)}aOt6Ca`~ zHy?ZfJ)$pi<6_0EXSKxbc=$nr1o4uWp%ulm?B7{>@+KEXh4z!T(J}G&N@O;T-Cwo-6t;`= zs0%K;-Y-c;&d>$n6ML0cIN6xogNw-u1%|CH(wVBLV>;pol{PA9%}xzfNVRl<>c!mK zzYl>g&y3O^2PMyMQ z+iml^z}}VUW0I*%M{}Oa+z1gTjS{&Xl<|Azme5HS>xgApED=**+gk0bouk`ZnBz_6 zJTW`w=d83r;KJg(1s+1Xg1o-xseZVZq(ZLI;7_jB9ros5iFagVjZQtNDp|3I9<%-1 zM}69XyU*7e5$#fl~Z*PKL{R05!;eNm%+o$>Iu{s|(5N__jBSPEQbW9~R9#XR?}Xi;s7)rW_v>osms(5!ghye&Y0_F@}J_|j`d%&b@SN;8d=fbiF~F3tHO z+l)Ble`zcvkPvUb94AT(T-hNsSRJZ{SLum64XkWWwFw3FU&u$5RR0bJ>(VBkLd@=| zU%pvw(&U<*lLAiAQ-EL$Z4+t!dPd6Z`E3dWOy>r2{+@#;+F`7JM!)-+LL1G(b8thQ z2Rv17Aav~&t3|Q$(8~+&w{)RJJ^a5|Iw8gX<=z>(&__ z3CDelC^~1BibhhIAy=2o=g8kfMn=prC|j+k+&W$q_)+~NM$*|bZw7SBcV3R)R|=wD z2qvVKr$IY^_l$K-jw>Ud56t1#jg;@aAim}?puO16J27ZEW`mj~KFE-FjmI2hgt`3Q z*-C(r9%1LW4itq75%Q_T<@u)=nJ(|Bl)oqQ76%rrY2y@7=WKWEfXL_WCm45H)0jMF1o zF6UzAf9=A`uhYT65;SI@P{i5gOK323y3A@WcPdA93=Z-YE-C-ZS|CBZ>6&?Dkja}J zCwJQZ0*q{xE^}5dMp3RnN+A@n0AI-zN9J zMExHr0{9_843L-CFX}0<{LA$Ek1oaFFBOSVRQ~fz|Nr__@|BAbk4SzW^xJd@6*ezn zJWe~~DOWRRc77|*Gt`J}g^j=(+y60a|BR3PI3WCv?-r-i{WkNu&Lc>3YJgLKo`E3A z=se~}MZ4(8HG8x?(e?s#X&IP^6vrG^|W5--I>o(@wGz20NX*WoQgg1({oPUe-2`5$j zeiGN=(U9B?wAtz2XnYM(pYiPCzDQ&Utpe_2Viuq$Wsw$lx&>Z2`7M_JTj!1a5w-a` zL>-U+h%nV<4DfKWNe1|Ulv0vGt?Ij9#tm!M%r&9g%M|pcuWM?@MzTNR#$19EC*n^Q z5!=0)tP3X*+#lt+h`&g7RVPn;$$#;)6Cp+|O=HLSk@#0s1o=NFwz)K7p`|)j(g~4v zRK*Tp+MgX>sCME^4H|xbT>lI>xK+d)9XH8c*SYGa&|cngz2(K?rt;R^9d~!H9qxFj zO!#=^I+N9f^NIzr;5n_b}uoXy$6l462Qrl+?c_9-IQd;fXR0G7#N z_QO5v@dz^ZeU1nFhl*JZuKr_HY-ue2c(RCBH25>w648BN6uLUFN&9>6A5qOiwHxec zhU#wi)VMirvXr?xz)u*={m7d;MgMeDfVp=|X1=tO8SSXOCKiP;i9Uk>45`-$oI`yZ zSJ~#=53b7SNDcRoyB}3pZ|2WZiVIxooi;NoXAIS(UHN^MIMW1>an1&@cbnWhIU0|6 z(AX6Ao`y@S+Nx2~*nmIOMY$ipAN)(i{`n1>=>k0N&U$E;iAd13SA7r1Bl3UtOk$Mz zn6O^$p{?g;MWa$(flQ#5S)^T~sV-SQ^r_y|9un49I_xxu zLCw=OKyXWPs=ukb<6g1dXBebn;tg3(1EpLnp6O8tfH5U@3T0pJPIO4Es{!LeD7I_{ z&wuJItoY0*2@v|i=3NP|EFI&mmhmUFc*yy80-|%@0WPj>cT%?Gtq5Ka;}_?I%qbrl z516a-C8-gqdz4CyXASWK`DS7G$x$JYy46}c>ymMs?4Ps9T#4OvbNH@k!Fehe(^=g> z-r%Lis_}c>le$RG;7SL5wAImUcw4|>gM?k&e8d?a= zQczuu%Etg0k!Y@72p$S_D8H(LMW$#R55{WBV#=L8f2rp^Nnr*gN>X!cX>_8XjotJ19?LkWKE_+9=-4|~!_W%!)BxreL^+I#Sf(!b;blcNmFslW8hO5S}(hbJ|b<4^xhLS(c& zPg>}hYziMApiU1^Q`OI+wEJG3^;^FBj`>O9!AzJ2-I?x}BT%Qmmz=8m>{DIW2#LoG z5s_05=$#=FZx)=`y{e;P*DucJJNlfAj0jrUsTz&545;a43FZ878HL#050Mq}`WTdz zKRUH&Rbit)v%eqDrUoxNZrAy)%>VLLJeB#Ew7KC_A}J}uD*_hH>)!f-Rwkio z-vpF$%Jyc&TZQ3xc3YxK=JiOOMA8ZKn;Ld;qRvZwi6_+7Y%{g-yG5v(A9$N#N zC2_PrWZGR{v&@ol?kBb+wg%K5>>;V{qj1@(%J!yaJ%DayK>QX5NU}mfdMQXkv#-ut zft^?;>s*Bc0LXfT7SVeKrQHLlwm7hr#!tOyaXjSM;6LGDfSh z65%s}CmUD*#PTc;u6*)#APF7E@y0v3m2UY%9SsWPD0n(_`P}hsZ4G3m!rvB&IdXAk za)-}jR*?yZgC$eCs{r9J@^kznIq)&633~U$_;ZNo4Ch6*AH(MJ8Axkhx5G$zX`K>4 z52CM8dusb!+Q@Y_LYoebJ-Iv|Kq?HEdQycXG0>?JKMZnd&xl{rchMgt(L)e}m$&kw127cC_ z4eY)ty{&!7Ltw3E&Ps^ln&A|@4%uEpakTSX>eaJ(r)2J$r8rS2>}?fP+d;Z}zfN(KIT zc6BP~T|n!&Qj+!|%>1?_geSG;WCwMVRkw|jMR}qFJV@S`!v81*KcN_$N)@=xs zNy+7gw`Is+BQv^IeZgM&pqpq2@CCb#hp0bD+YA?K&6gBD+CF#Lv>8e)v3n=*;BFN_ zsf|reC3)xR_dy6rMq-T17C!U+ESCaw=OpfmqSa6gJ$>0UD%l-f$jvo8&~v1JcXb)i zq3Fvf`{qKhX=Z&%snWW-W@%LMZvE~Z~+^L3e!$4NqKWws8ZuZ4@|_tqZlJ9swkCl#4UysTOwN9`C?H%rhwrK0_&Pbv8(xsVZV!m8z&#t`ckX zW+wRVfDFPQV|PuUOFtx}OL$AfYVKCFHhi^7oh21cl;7uQX?#!PQF(rL{en!jAg_6p zeB~*gI2h6SxrnUk(E4Wx2A8rYeWl7A?dagWoVU6#6c=0tD)+p!k$C#wb#J* z7x!&;Fl5zp;JF!J{zsW(L%R77;~aL;2Ka4To412Shq;^H?xbnEjNw7(pjEGRq;EA^*}fm=x!E#h3>)1J zq+Jd<$f?|HRnAo_RXL-kE+E&6ml4p*f{rExkxk@fZWOMB=_Y!m^k4UXQs6c1SGuBhN4>%&~rIy%6i zqPNsxS~q<)kt=F>2k!?#+IV5tv1&@^eY}`myIirx=*xhKiA~2mcq5igu9I1G_Nn1| zB63|PC3Dseh&e6-N+b81NvGpP+dODv=XJCL^$vuj?VXi$G_{-V0|%w+I7F$0raE;j z&3rF&tY#c$cw@vZj^Z6)*XHyr+o?LQEO}#wB}$Wb10n~;^-|tGQ+E@uX+HU+xuO9) zNNP6Cv5W+8_~FQS;H3DCDQ4>Qs`ZN>#c;n!*gSJf5ubObqgl~fW7<|B1<`Ej1@jx` z+La`8E^`>}0%_)HK(r5{3n06-U-xTI_NtF}x8Y7K(|QBeP4vn3S}90}=o+w);(lO! z|21(Qz2G$k0P5;^37P&QJMfuJD63x}Em5``!hX^8q?ZIH7rX@0ZtmW}J_W#6LgpBd z$lAjKF*<7joXC54if2YjiR(4O6?K`NzHPt<&a?dJTD5Fo0jQ!k2`{t>O?mOa=iZOW zO&?z)B%dx&{|-RJT&s9ajWXV><8|xJR9b{$#Cx)HQy8M^?PSR1cvNfemVG$AOyK-( zqqIzQg`XX< znL+Lzuuy{ptrxf%V1Jnag$y;WSbIeWgR2^+Kp-K%>twgXwOXi?zdAD1T z63H8C(V?J6miihX;Y(mE&AU}FwxHQ5&j?L@-UA?W5)Vd3$lIKjJ8{T3RY~`pK&J_O z*n!5kj#(_pw2*RmL}#8rRjQ^+NCa|jeeqzod2_3*G|8EFJ;`loZ}baXUB=xe2pYX; z*}8s+y;5>i{Y1~!^JE;aR?)&|5Mr~sJekx1WC~;$q*U7RyD|=m><@l&?tdrCrTl*GUywlWCYzF}3PaBuFsUahotiA|r!Ivpl7E-FAYIjBho`QrY_2}fVO=)YkeONZBAkszry1$7xi$J=$n2sI>u}0AZ2%<7Mo6X zzjlX>zd9`H9ad)QbatK|869BR)=;|h$QV>C#c90Qx|9JH9@g7ZQ=MdaLbuGXgh_@v`Vzq&^+Oap&yJB9>8$I@BAiG_-Azz7mcnFoQnqFP7PNt`Y&V z7`|cJ!+R@-Me`{Plv`;~VvBIil`Slln(9qti@2fjXWW{{5J&6s`>odu#V)KLY-nxX zAD1P^I2-(0mpowY{$$~i+T@{Ln96zc&6_EOPq^=!Co$uYO$XgL?Xft%2)!{(s~i2! z7}ac}=!e?x!?dQ2MA{BW{_*hk{c=YNlnu1(EAUd|M-hKfV27{0FK%wgm-*Vd%#Er@ z#f+%UMs>Cot#eZQvE3xpOGLNreb7~>hFwue2C_wtfe9g_sGEx3jVct&^Rb# z2>bc!o2OGwN;ZMBIIsb6P&oqzT-w*FAJ{$bExfpKQ|A}#6_Jy_TsMJuQvRk}!RQ)T z5Q-Q2ONt`y-1eZjo6$}~;EXNfA#}GSLgEvR!g24ze*PyP&}J>sfFg%}wi&+M*P~bz z-vkn1Zmw$hDYyXxbCK?X#fxI2k32=8=C3F<0+vAmI9AO6(?u9gsa>N@2R(^nLTu-; zmEhPNY&Kr2;Lv+&jCJ#QP)y36I_u#x&zvBi3c%Ye{?XIpy1JQPv>YOjA|=dui1iYD zW5Zdy`j;Kw%3dYIxDcm?bb$U^Q8AB+)ZKbW>Yl03(Px1aWh28_C`b^-f}D(1=c`D~ zLuyJRsWIq8U*CI080pqoe`obsCq-El+QXc9r%6I&cm%Wzh`Ce(&rEC*$zo+Hinc6b zg{CSzm{Yr{6Hh9Bo>LN!d?B# zXd~(GRPk_2%|5ZEOXx?;9@$^tSw$_k!v5!q{@~ZG5hud#bQNM1^x=cb^*IVkOAN!; zjTv>g<$P07BP_Hqd3Re}rO`LtAzKugudzlyn{^A^4#Bg2`jh84_CISsr=T`anMPAnSUt0u3CJ->PkPQE4k0!Z`6y_WlR z^arXvnME+Qnk+;pTrGmVt zI=pOkQM&X8hTxb!%UXf4YB2QmiY-&}N#5p{@U6y~O?!!v+Oz5s5nFyx^<@p_Fxm3` z2X2L%jQet1b^X0l4J4_({AC4#RbrIe`|^jI7uSvylEv&o2bSm%rPK`cVgyk-#_u^2 zMETFDgc|+~sv%|lu$*$5W4?u_d_4$xSwwKqlRa*XjlY1W5{ z?lcqqJtSEJbsx)40IgW^w6WWF((j#Bn(s8W_t>06W~WlD*7oy$jCQ~uyNm`P%Ntz+ z`kqv9{*X;U;{tqEH$KE`h0uaaZQhG50jsad!8iv3nM&i4J2GuzSySyYbIQ6v@h#AX zdPHP6CW`D`H2v(|))5HwC}hV^vyV42$cv*jz~+kcEfk77f#@`mexcNjVRp{Fc`EN> zBK;{cl0}TDRg75jQH1iV;MF+0sdq}F-vdbQ*Guu|q8~;N%GF@gRxuXPEd^ zLzD498>+Th(>4VAM>%)apEn6hr*(o>X%8K;#74_MB^pwPvdjl8OiB+>k6dd-E(*vY zQ%fI5?1tz-$6SrYiyv>#(Tv<8Trq(MdU2^uMF%-rX@*y%O;<>@hU}>PnmVGHFBSLR zRJzq2=JkEB#6+*z?S|TP^W*Of$Z2U3Ei|r00e2c-wRI*eMtpp)qy)1*n&*HPZ8|6A zlYWjongVg=>Q%o`#XTCy^H-@JD3V>LQnX7c=^`ZPeYjxMn}--QGMOH;ubVyiwYCa3 zWEpqY%3Phrt&Od_74{(S3MIu0Cw<)R;OaBUQW_p4>3eMb&P<Rm*(+<@PaU$6M>- zUU7d0d4yoy_Gq2=l0d!er1*5{5agGj%z97RUC)rK*;jHzeh6J|a42aK#uhiJYQ~Zj zhmtZ=zanGlqBAyPB==P_G7q{N%s6{%#Nn&YI(*82$zJwbFye(rA&ZO$?)B1*4heGq zI(Pko)nXzvi++zy!$0 zKS1&1zkB2fgpQns4Pf`Qu#9uXU987uu;gcXH;JV;)&dWS^ra@KFJWH6;@@ez!{XJO zyX+L6+do8nb({O_2=vp$kK0|x+*DXSHxyE9yuhX{fssR@iWIJq`38_+g!Flx<@pY1 zdGb}_DIGw&kUlk@WRlMhH^{ZZR5`IMLWB7PS|VNd>E$K@3hM}F@!nJ{DH|@QTk)r( zEi}h>j}^%`eH461L-ucntMdJFeBqHhFinQIj-zza*Q_%JM=Wtu-0f0qPF}3ks@Cgt z9WzO4ECQ6<2KF7kTuZCp-mxx`D%4#*&VWU>?gt=l@ci&4Wtpd48xr_IpZFwJyA)j) zb*s>@mWk(Ftu#0nQ}>d8ZU`jYHnuKHD?L_{87Myc#;uTsVJ7HM8J6*OUjHYSSz(#C z_-&Wg$hpS4Buk3j;~^Me0V)t37~xA_mDU`uA9T9M2RvFE zFxe3F!`&ul?L~2XcjEKSy^IkIn0T!HVnJ>NG_O1IkAI$lbUiH%++wkzS(0r$Nz+b~ zGFQS&;*qYmm^(dv8R9}rgxQEaUvKQ`b#8tcr1}d1!IQV|@O>@5HdK7UCLwSo55=(O zJMRutKi4^C-q1y1A~8<*Y=BRONb-~!TK~3-)&IcHg&C@z7w^%OzzgAg3W+<^3?fci1_ErXDHzBF=}A`-DqN&MDKL0z?UhJ7874tfj0^b8gMwhEjrj>a%{U=ScWbU1TC}8vTp___n zX^I{Jrv0^KiVqlcCJ^GRDN0X+qO2Dg48^&oFR~fGB7L-dR>Y9h1kP^gf0a3r=CX{^ z==-R89_He=IBwon@`i@Z7kAZA=R^n9At&M;JeDekR5u%PPba>eb0F*Kp{JOr0%tRe zOW)^Vc^9$e)wKf`KZ-UI6bIbJ6wNc&o5mIxonv+Pd<3;upS&uyMAo>Nr`Bn9IZH7& zgk+oN#uJ5XTgqad@MbqlG7AU(q-urIkLll})653x83vU$-kOkqBUqAJ${)BM8$wD% z*;ph~W5Tw&MTEZMS*hk$y6jrqL&E0KMKmlCAl$e0aSmnL055z@6<=$A>zOzKK@8)t zjF$lN;N7XXI#q51`;Eun{B6U`<5pc5IRtD-1iEVJA}691^nG2ZX773gyXbeesdZMo z&Y^z4S2*t;+ew8P^rz2?wkDE`RVtp8wtqLYoX5O8>{F~q^86}G;i0?Xw_mjaetU$@ z4vRg>$WYbgCn+}Ke*VQ*4aA5%WTNgT@KVXw7#yla&zG~I+z>W7TT2(vkT5U5Gjxg0 z42^Fdt`yA%_w>6>g1*8bN2+bK zp?>kIh9EeWDv==cYQzh|&H|U&i+275P;NQDvfBZkRem@flJ3A?>naRt$}*)X3i7GP z7s9VBCUz1yaQOM)!30j7zN=kqk6%#Zv?YvL)~LJbwFqPmt`EBEJ3XBUf!*FlkG>$) zDA6yiR-B>iHObhmFNM}8oZmiwHKDa+u|db{aiY^j%;q5E8C5eq&x2+Uqyj5d>o?o_ z2HtaTYwh6_^Jl2~>X$2UMZytJJ5;vQ@qi5#_8LM9}&$B7}Ug9Mt>a1l-}sOTKkT~BPxP=A?Z6uRCSA6grpRYUAtd>t4`s}SCOS^ zGGV78k2~ytY98XH6hne6R#*vuXw=%r4f{ISkB2xW+0^DCLQjDbl*5{;gWLtB8hU?R zeI|>&miv)>7v(n@a+?^vzC+;N>|c_Q1UBBek(|`0@v#4*L#HW)jjQ zA(8BH-Errf$uF|YRiA#ekJ2R#@18E|w{3eCv2Z3BR&>4~b;o9%w;^8JWslZw*lAg# zpLl+5Dt+<`S;H2C4ci9WuH&90@zSLn(*@0pF4 zEIK~0GU03`UH-3DrE~4&$=5!`EElnt4ep1$t$PKFOZUBp$B)3p3tsmoey_27-u-)lC(r3GnXdpPP?&I3g?w?;X~OB}{kyCc_V6n`CAtSYS)mOszkX83mt>!6AxFJRITP6iQy)ZTTl9J; z!k~sru06FOAB9^GQWqoXWpx((>S$ydcXJ{1MG|M6Prh&X9#^eQimH6SX&w2bPnGRt zFkmIhzvQ#1Ymv15nrwbkEh|#&n5B4u*ACvDl73Nzr}k|k;Vqh=8HF>DXTKEXNJ)0C z3K-T-T}9cDYC>M{^bPEYh}dxIY)lQ(4K2G@F7(s)*?z2UzVAZuBJTo8*sr^7@4PnL zqd$-=!Uluse^O1kf7ZKoQHyG^K2yx_b+QQ+({dXQHo_SfSSk`1=#FJG9f+DM9~+~* z{KMoctH~`T{j7Suqjy&gR%mWq9^%xm@-`W=f@Aa5ptSNoQO5R-A!Y$2FWP{}I!mYC zK`rAUnZRwsR?Zqlnjw>T_Dh6Z9#np7yd97GZZg~%$b-p;+OW|yzLQrZaULXkoU0yk z9a+MckCtRWBD`!1-=>@dNoI0*cvuC97gp+`a#;^E=Y#q3F*Xj~=1d7_9n8>{&W@Dc zqEhLkXhqIMtW%5u#(>p9TwTo#N9qov31Ec3! z*JWVGWg66w*I8OU{is@xIOX2xC z5^4MLi(VY$R6b`g{1q&e#?K)iWr%&K*6Sy-hpTZ5I0pTMSCoWJ^!1^T8)7PrB}0n| zFCKDrONTiwp0HEGj-AKJk{}m4hgVHu0>+g7wQZLIO#8TFA75(ebm%1R%-L+=r^Qs?c|S|Z0M=6{ zH$J5i9|9(GsqF)lX)W_Hp);$Ilxm%%mu-Lo0j~B;R)Z5Id6rVS`FNk8g{p8c%&EeA z1ZvhOs^hvp8zp7BxkS{YDi-FPrT4jOIeKL3GjmSULdjBr)ynyt;Jw5Kpzb_ZNiYAk z02NhU;$qN1&$Tj-2=HB|>(MS)j_`j$p@DNb!Ii&^a$spFI@j6l9BL#uGVJSF85evb z%wLM)fRW+HyeY0|EYllZfo?H9y$ND3!b%ij{jq9Ij(K2$*^1JnVt}>pGuQdhGym&F zD@t%UGq3omHD~ZAWoEwXUuai6(r?vm&H+U>Yf>ew4>|7F5;SLE{_bTJK zm0uzzJv~qiq2REdUzaI2*&o`aGnyN$Yv!$HY}~yae%5gP1<6Y8W3G~|AKr6AQ8c$s zk6$~CC|;;xpt9fRj8Dl`UV4nNHvd31WOX4dg}+OA$kP7%oJ3_}EnRuIiidFM5blPB zSUJwIIHW*N+-Dmb%0NRLxfWIUMb2=q^lc67DtU*5ss~49qA_5=x1>*E&L;d1cG23#{i2!?5@HS9WG(?^o$RxG?XmggB9MaG)HAuz4Re zv`5ZCpc_4J6)3skDgv=)gJ$+keoh^$!DCj?zL+Qv7C&5?Rk1vqo6(ylsVIl)M#;Nt zp2o)^&bCvP^<^Onj7w+7crV71QAypTG@ZJv-S8xRd@*I+S$){KCCDnP+*YVw)tc1a zek@VeZ8quf^U7#uHW6nwX0f;z+v@K`vdT=(A;0lsyRvOwL~*EjL?u}BhtuI`ewziE zP`511z;4w_cxrivQ`@H0K$bsDUi*g6yGSbYnH~VhWKx$M*=3*fD+>*k&q9eVI+Iz@ z%>Jzfz;vvGvMO8^&|MD-9RlJ#^=|c2g;5qY6kZQuW9rwC3Uc?AuW>^TC=AbiLk6l> z4K!4wMghet?Mb6;_!ww=Z@^J!N!?AZRI{I(r1UF{URjR7d6?KNQ*?gUFHRe`l4smh ztRXelHC^tI!<=4CK%_@;d>whAlfvck=w3&c9s0}#QW@u5!VWX9b3PK)uQx!^C=RvF zC7HMp)zGZ7ki=(LIVgeN@q`2A%zElB{>E~8N|k&%075%`dpcPSaMVlQ>>PLCR3w?- zzCOvz@lO^+SFr=5+uKHGcCnVFQBCqJS!h|~+&3}-dNnr!j*3?MY@Cq_xn&HbWC#Hy zmtQzeNCvj!-R9{`{3__216!uzp-yaGFs@C6zdrv(*6vV3v^S&svGN7%nm~aYpHmba z<~fWE<3fbFpM83erCCx}E71q(^AgYYiL%h2cUa9|PH0B!_traRIY}Gpy;+_5Od))S z+4CkL^b!JXMoxT(Y1FW}yx(vJw4?%7vCl{Xda>6(0}y9-nadKr>LWqVe6&W#Bkf`kt3o)i)-tw z^hn=b-9TP9+!?+Rc6;Pdzmnd>&~2hl#87xqRa5#FTA-e|PGF#c=ymyF;#f-MW^qHl zTV*J}A$gjL8mZ346Dk_=4-@?(bVOJ6mA7@&Uw@z`8P%K(MqQm~x}zdkxuKtHq|kRJ z{3VVcF;xdWph^|7QpT*B6{_}G7+My3Y&t zpeRgDw7W|3ns)H+0`)J=I^(`RGPi?rc4^M< zPhJlo5s~h7yD|IZ+oTTx(G>0%`%_!UH<`Ugnl2+5Wn4mN`L_@B+>yu2XV~sLJbc^kYSi%fJ_$= zM-#x~-@W#dj9wr>VML6aJ6g~*kVcH=8~o0zn@0$3_sfckYM}dOCaH(#uS`A#UdFi_ z`Ntn9Iv>53tEbi{qq?S48{LvXuKg)%*OuMw$PU9Ko|?<2jS-wdxH|%0ODUpf+XpY_ zS*)%=eQJxSc4#^w-5UMXJjAfFoU!`S&UaBK*h~n(> zo>ZkC$`Vqax_`9sNdz}h;qB)n|!qTz5?HXJ9C}!>(c8ga08Nn zA*hr?Qk|3hHVsgjZm{=sjn(n8a;?-qDp_V&yNnwxSWULQ;WlLR{;Rs^nuB9Ey!E^O z0#im)MNNKlFINsxWkv8l?&_i3NklKGpLTRPN#J3F!| z)AFqEC8Nv%F>yt&d5sHl1VRbym|}OYBs03;akxOOlPThy%gt|IveMdg3l2!`$kmV3 zyiZ>@BO-pC5PJG_M&ed( z8kkZ-tU6=*+~@nAvoBK-TY8;aYSm*#sYBF}4l9#Yow8AncEit>4lY0Wm8zc_Z|Np1 zdv0VRlXye@eUy{=yipp2>ba332|bZxd4@$y`XO89u{p}hFEW$!XPCviJT}QQccD{r znHSed$)Df7^y3D>A;CFTYoq*z{Ll1jQY-Jc4hUsp+-_$d(a0Z?3xo^m9Uea;5Sl<0_?E9=XG_ z1f=A5MOSI5A0>5LvaOrI$F}AB{jhq)LjdUMxY-KamzXC_M+};Evhq%`!wqV?VLP&# zZx(Irf<`Ap75Sntf_rD-9%D)hjYTY>Alt zYNcPSZznm{Ay=%Ur{7SfVIlWFV9RWWj8?rj5UgM`qT~pyQa-Q}tU|XuJH6TKcwNbF z_d?4fW9ENW(d-E8OSM8TQ1gvt?%EOU`9gg7zbyn3=eQY{uT;0E^rU)}34MkC)Nwn_ zw3B&E%M_fg%RXr7w?4*!Yt1*dxXrQk#b31Qi|tmknJUXS8NclqTt=usgF91dUUPSl zo80l-d>eD;7HDMF9dNp^-6)ZjQW9PLbz|Vve&E4F|1IWA_Y9uLG}O;dmv`Dd z_u-9TeYC>YK6g>u(bDfEIkWi}J9hwC;}Z94LY&gimn4&kA@TR+*xY&~NeH%+?I{Tq zDCh9P`&usK$Cru#S-KVY31!0z1Tlvk*IWSg1vpTwe0p6@HJb5*HO^H7Xj}&@Lk-G~P-Ec$Dh+d%S?< zYis=6OT3eBIJmsL+`!>Meyt3kIZc5yvD0SXku7jvu^(oPTw#W$Yv%DaX6{#FYLBIe zD=bS>ck_WQ&&c`aQSaj1UUex6B6@4;0t~L~LIX%{R~>4Q9wMYs&T!zv6|el#sw8!z z^#?=t)>-&K_RqyBN&3#NnoYs~!P#3!#j$1W!$1g5aCf)h?(PH#3GNacg44JL2u^T! z4-h1{yF&;P+#$Gk<9x-PS7z>fGr#-JTKv(gt5{ui&OUoT``MO}{|(5S8?bSGuT^#y z2bG1o=d4x-sV0V6_uw7QbwxI03!EgKc*o{|Jy!#Qacw0N>mUlb{$vhxphm^p3qlh? z5VS;q_k7*i73q5gbA$mmKx>-1w7FIDlpx=9_y~0yR6Y7wqxF^V1_3Z=Xv3pH|7*~Y zf}n1FC%<)_evmmr#+BJyD8}{!lHR)s41wOM>)W<0TU0T&Ny%2Oq%a`7ihgKrg}lwN ziAL9OA*U|iXwADGz_1YRaWD9Y+wp+An1+eG(ry_U6lQbMMc|{(xp7`BVyrT^keR_l$bu#}z<2zC3r9pRc zILsOF3isFEC$MD+Tj?Nalvcd}lUvZQxM)FA)t(DJDU91Sf#vhv?oCS4JP8yO=+{zt zpi{eu-0xq}2lDQJWdjY7tsJI_Nhd?l$c~kOv0GUeouqeJW$rB{!Heurk!$K`SU}KC zTmWT-lY+%xoC`Hq7|e+)NoNu+%7+W02(nskb{#MFpa9E~4w7M|Y^I16=+cK=NTV7E z9I2dIH@?!Dd`ca6v^VKC3I+vQghg_47p&9N$*wSx4>-Q9^)UJtrE3;*6{fH$C%ytKHrDZ1YXxRm20D2;D}ihZ?;c1h~zS~=TfEax%D&Zd}V zE>HuTN!)%>yqzMbl3h%yTu=GEMpDOm7HoVh3Xl=lq z)?pvjFczMnoiY#lzSw9%5D)hWFp}@UWU1*WK$FTGpia}&?=~V>h2Y=jlLc+Qr#EkS z33kd&sEQqN1X>(FBg{Q)N1)-vc$?+d^96_o_#uX5^}t)|K}`^tUe$-&XvGIdNoQ(F zv}BI__>KT-O_?t9Y5##_(S$!~z;HSlz;C@nFa}@Qzrn#lQ%co08p0S>i7>;lbO7@n z#+?J6li!_Bk&oGbvLigK`;Z1bP#hjDHdMu<+>QWue3bfPNk$>n-;OQu&~VV)M*PBVt2@pbJ!rlh3oF;( zs#M=1>?I-J1C7s#icbQoLV7tanUMPq>qiY(63(lMpBcvg{+p-DG11MmVEs@e1;ABC zXfO1Txq5Iddezlg_bI8)_p-3!AlnVQ9@kIuMrUO@O^|Kv^A@st{+h>aUtJGem*L25 zC$LJr>+F$Gjp4AwU30(x*ZIlcpz&3^^hb_XKVG^D_os#*4E{huZ9I&_uqF(sI!pf$R&|Ve<`{A^ZL5#?#r=9@(?%7jXUj6 zysEv!c;;h&wjssoPtJ;q#-LZ~BeF!59*)W|`ceMNA17wp0ytbE4v$*7UhR^kch)b~ z3zK(UtpnpP&zDkxR9O8rcEWcx4?;NYc_(la-|d4su{YGi(y~Ash>-opryuad>?Xlj zyJSz-p%xu5FM7zoNvD1UTEmxIPP#i84orpy4=kIF`lx)i<6uZ;o`T1M_-kKEPXK+l z&BBxB_&_|yqS=EsJ0yIZ2xHUsKnYF?Bv~<;B@uv`@AHS(y6n&hdZ1TXL76dN;d0F< zv>=6PAZ14QzCn1}^fYwwu!{nydCZpX5HKI;X$!9>*{{~0+cY=_j+}e&F<{H@ff5+U zJdb8uK~HMu2pWwe$R*{^s37Ds=VZC{*_VN1kaDb%TwZA zz;Z`{WRSaBk_(-ge_r72d7!e?lEi{$4uC0Ah6d7(S$!ML(a+O1!oq`C$E0qZS)8D+CY@uxi=YE*CVdmpR(ziw`|7;^io~!?ti2S! zeJcMs68fnlGT&H0R$ux_uB|p3n0K?j8%PN7`paI#Etzt~PX$MEUqvLEx4R!rI$xp$ z1p(!*z83S3;_0L(LiL+FHmOHC?Q)!Ce+S0@$VV6yhF&|rxx$6b zdA-_Mmn$+8*Hy~8CRx0nH-{cOGeh_ec3~rW{e&$N}VV+3}en8z`y8r zZ0?pQhFHb~m7pde$x?U^noVO&NfBD3PrB44Zv!}&ZYLD)X{n?e&^>{^vgDLv+f5VHa4eTb($rWJlQ1 zSi`R*bwi!XKX|Sfd!f`T>=NdFko)k;F_&3Y&RjR+XA@UJ3=awxoRwHg@PQ$t9SxlO zdP;8PrT|Ocrgl0u?`Z*OK_Vw&+UcW1SxPj zW24<-RKo_!Cx?Sg1&q642U<9Lg&RdAW8u~bZVAxqfEk-X%GDIuHH%=-ofA-fF!Y?> zbNS=Bb3(C7$OWbk<`Q04e(?fOV_Oy>KM_YORr>qD_*KmEscno6&T;y4Lh$LsBrw&e zXYaYD+xTM+o&hLV%pd-TNM#g|9vs@(*SU_`VW_I|<#*^NDe+yfO&R$66!RQVRtY=m zU>JQzm~aq`rR>n|7UWf2ob9`C=6%G1_T;VvgRYS47k&X$Vv=Ga%u{6aw6PyotO-Fq z>;SXTxDGSHqns0w`we}9WX~f7*oZUAOIk{hOE^zud;qE{bk2+U7i0sNw-x_0RI0`Q zHcj_e;6q6oV7dz6yN9uG`NMmH1s;+4v-@W*~I}T)17*o1M~l!X4h}v;nrJ7 z;Q5$@eStzKjCQmT*?rJru4p(I+7MkVi=OURkByMfrpVL%h4Nx3&vTI5O{i}}>mH~x z$3?*mG|oPR?nJ1~o;UKx+-KTnD>6e3VxBZuOGTjlrQ&;zH8JLRNYOtajMaL#A%Zr5 zIe;){9fmQ}17b9UKO2n<@SuW`-N_Eg zhhC2;8~XjCSFu42R(gi)zh3N;z_aCBGEoOAR#et+Mp) zG1n5MmtK{tuDf2<6KE%}OC2bhY;d-l2dd$TRgMMlQC!I;x}Qmw(<{_DMlW)LeN5Vn zkw#MTF{)_3?mfk|<}09hg?~ad?g^*@B9hm%%q|h3@R=qex&M3%J8?Ng2sgWyR%C}# zS&dKbZ^L#DhwM!Qe?+?fmGE4CW(puN>YfrxVq)!^zC9Uz+Kd4sL)bwvHP2~ZLFjDP z>%cS>6&$QhfS>=L5Z-@%5L1IaUBZkfT*YJh94-9xVE4{!>I1yu=MMUv=%t(PWT$=% z(@0;%EwTaYJvM89yDaS$U37fsrXC~t2A(E0s8`)1OsynzGatgJ*VN_V zrNkT}hIqpGPMxUrUl?u$iF2T*Hg>25T$P;E7t@5LpOS5S6O(Kky*qrTJ!u)Oo+BMT zYUFR9t6`y?MpP&zF{Nd?5LzUInCe_QjFLrPR*sX+Pj-;($6VCyBl>dDEq9RhbhD!r zBM^S<6R+91p>Uy`M!NGDyVI1PRE4ySlO&MYp?6*qc1qb)k>ugv7PH3Fp z`;xD428$P~+`Vex1-HZxykRFbBrjk6pzF;#y|wl7ofYo?PGB-_hWhY6%$Hnh;9(FL zgR!r4e6*hH6o_$KYz+)!DUK%r%-9k#HHd*a>e(-1zd3U9|+lNKF5cGV#qI*H=rU>kA1E)*(0S^UDD zyspPc8)Y!SU0ull4(*qmBC1u1I4Hv6MOsnFK-v>9H3_jw;s%Rg12FeJ^}hkdMHZ-s zf+;N=v_=}g=mrciny^`Yp8Wdl7YInk4Mc?Kp*PXkSibAhN;SUIdWE@Gunr9Raslix zNG#{%!*w|PIF8N$d8}nD!9Gf*Ci-?WbG!|~F|^}y-*>pVZlI`sLjG~Z>jWobvC(|L z32_%5U&@pS4srdF@F>u(akZxt3&Fw|J;20?=v)LC|4AA6(@rr~F0UTx==E*iZ>F5( znXVu=XjQW3fwzm|ddHNo@7by-y)qAuH<7>KqZB2T4H`5^3J1MF*9%oWd0$d_aKsN+ zZ*p2}_q)n878&kc?r|>b?-?>7z)o%hBY(c}zOmwe>my+TYRKKWk|xo|vw4LJg3k}K zcNFg50>;dBI}5^X6|jDNM`CHR%0%%9RWiuuFM0D)}UH$2SZgluzHNNtp5X z+G;Dp|3QR-NIB;!;`}TLC@M1rvn0_%AzkJk?uLu&3^CE7nCv7yj8>t@U58SR>Y|`$hy=4X)RWsSWUr=j}lANbF&m zzF$8;>cM`?GP_^_ka!T^uMtU1snnjx9HN6GI|?qKZ%zgkUqeqE3;0FWO!dm+A;;`G zOMo_p9P&)1+|E4XVm z#9IN$7a)eUKCR6{!2wC10@N4@gPY>C%CTMpWf~H;KzB9>KdSOIcEw%mWL^9as{!%1 z&b3#NvmTzFQ4d?R`>^EbqyZZdw-&n(s_IP%RXVil$mWWWsv`LlmDRX6#mY%`A|CqQ z4f+%ZtbFpb506^fUK{H*kDBm?--x>zmG92t1Cm%|>(|8FiN&QzIL983{a3 zW;nLG4$KBk8|0q`?32?vp0uqiJG87nHY<4w`D9=EZLiks`DG_G3sxmIw^YS9dwq;; zzWZpOKI#;d%9FZ;3-1W8mE&AKmq0vX{<1!nzI(+Tn z|9ml99&JLVssS;ge}@7^mh^ES=>o>(I+@EtOAsDz-`|37ze#<%T}#+|kup9gkmyshr~tWI$c%1V zF^~}=>HW~K2?l?jU0j?Wk$VAQK%Z;rbI}TQBgxAH`pF0yZb@j>@GN;7BN*UO+&U87 zz5vFLrf;4ttZ25_f@{3I?n4J*C8(DK8&MzJ5Mr0|-X-h-flF5*otB1xQ}~spnL{Tl z{HdS@Z$Xtid@2Kh?QS=|5cIbm9!J3?n1%@cYe}>G2y5+#S1(}Pbc5|rNMOzJ3j(33 zbOw&DK99?W={-01kww`V0E1WpZGw)N=X^->S6@N%!)5;hd}t1-ReabSL)UMc66gpj z2PHA}T})LgCz~UvL*a*U{qIzSx2XX_9XjVT3Z#heC7)YQQ;nqQ)53BWyfXsg#iq{Q`q!o(}Qh^7%z%P?k zliuVBcM4RT96xkjk-GW-a8e_2X^l_MX4Jw3*o5SiB2WgOZR4y<6>7TkRQ`@5kY$X;dE;Kr=l zTgdAJpC@{>mA>2ZyX=wp6K^W!fZwjJQP53S^8`*S!eov03)k)|;c2JYfLg^inEhl? zWb1NxqyHv;db;6vS;21qhe|A7%Z&DZPSA44`W|aRI|DifYP#|AGV6Dr0DPwPFwKfS z6kOHX9&VHie;iQ_VnJVu*F>Sch+!X^60*m%_X`>yl7eb7oScSA@%u@Ls9qXhUp7Wl zPsw1R=CZUj{CpzuDKL&WGpfjHJ%gD6dsp23<{sD!vNImC-%M1_V`C;Wpj`)|HE1I1 z()d_lMAnWCg4VU3FK+}5bei-+ow}LQYbKMuKA#6`dJ%7mk*}Se5*o$PM~&BsBsx>2 z-=U&C4HE5AwWiUS<}RRO-n*sN`L?{*nfv->Z9h>)wOws?ES)L$L__iO8I+-(1xsC0 zr%7cyFma|nV_(XA*dmuv{D@pjH_V7!4>128Lt4r5Y8v9_MJJ#Aqe zk+3zAP-sF+K0ZrmM%``DqX&;r>jsNW@DNG^k=MEq?OSPj6w5*yT7MoA1H&H^A=fy0 zQD%UDTRJy0!@oz#jN;$VO`i_OntO_l)P)L{N879^vGN;jZA2bOIX4o;Iil8Od1%JH z)vpc~*-Jpa*}O$kSzr4U;8c1U|ApDl;&XC^Kvspz)Hc$|eyzns(s!FV0aw@{YOJfh zqgKq!m=dxSHmx2-A^YNO1DRC}h<8cDZ$U4DTeoWy%yEw}%Q%%hflqF<^ImdQLbW&{ z^+xkGACy}{>pOAD=!ScIY2%LEm^D^FzJYFpAzg#^Kb*cE3A_W<@7vT*S*g^g>W!QV zq3*Jc9td?mkXFWr@j9$^rd+MsJe`epRccl*yn@v4)zPHMaMz3TO z?0D4gOVv*e(b;$x4j%n5xKNpkZ?iwi+DwbG5ouTZ{K(UefB-KxL^$rM2S3>n!bdt6 zC^ffa_H!0=wvV+yqQz6`b+J2<+_$5c|2y9w8ZOD$ug2~f=uG)36g~T9lOp-dlIgQ8 z?jJ+7d5-66#@f)Q4#JM!QaG3r{8vI1kXA)b00YW4Lv}c_6KGx;T}ghQeAKm6ea!jp z%}){_rrCHgI`^b?hsz|C>GXlK!sUEon+M#UOSE7pP<1PrqHdXnPYL{?fP z{1_9jwoF}hrosnG$Z(jXr<=GlNTq?i$x58NuT7ngZJ!B547^W8o>j0j-OszWO`?mh zD;rTXYZ<#6Tv(vJ@f`9TxbUUj`@UQM&Yq8>;;Mc=IG9^8FozU;4H1SUA+nsnpBOP7IIVi(WqsNHV z_j1lF&>3hqMFUGE2wS6WSevNq1`aQc6}!V4)W{sv5V)b;ps61~wg$Bo#{mzd1Vi#& zct531+xoxh#DGc}@)fTVuy4n7)<8F|zyOmhC5CVP)Mw2Q3BBY)UyJg?&(V_g=R=wT zJfeq0aJ@N|p8Pts9SM~i9=)HMUV;&r-p2_K%j62vPd0M|xO@Wl>Eom=LwRz84D{X` zRiPG*yLx3VT6q=rumQz{EOMQobTkR64x>tpY1V|!jT=n8noezd{qNnN{MBp3L>9SX zeK&#^_%Ijd5k^kwXCXS%4ILOuA88-EdI~^;r}f(adAAyN)(0AUej~tjcOixO-C-II z$>E$`aodK^sZL?b_}>8OvoPg2W2uU_wVMr$SfL&)d5yO{*ERnAU%(;eWEGPIuX>Tp zA5oUyVid9NeyGALcnnJ(r)|~g2v6~1yA+cjK7sjy8N)Q+qIOU6?GIApOxft&iFO;M zg@hjhRTy^F*NQ-Lcp^ewr5pRT}8`wWiA|F}p% zzQ+>md�Sj%D57ldW}@&F!@l6bf#fLo*^&RpaJc$@3WMyU@q)$?FGNj9E{>$BkgW z@IyO7U@gXFa}<$}j(SQn2?bxze^}-*#CtLu=>q}3$4?8zdKIywXf&>5b>mr->bIwK zny^B;Q+~`M-F7SYHpXD%U}Yo~@9H;$$VErV9mE-HK+_8L3Vr6FwV+NoJe_SqH3DOW z=!Z|qiqh(Yt_`o8<%5pZ^0!L6cHhjpw5MEDT9kF0Ik~~!p+e(MskH-wakJgc@_pJq zUHeTX8r>+Kiys?g0vRhOc5-^p<$eLLB*g9@Bn6GHY5xHq0J2tx42^WS_)?IQ=I_M^ zSQ&rV%jJ0M5&B^}XZvy>L9Cz}_GfCrJVyP?7TePUguAp84il(Iq~kbVDM-C|SXB@& z2*s8om35WZBgrT)PAnHAXHrB_w%4BPX!|wIgc%^VNd96DH<*at{Q5fW_ewCeQQscZ zt0LjSAxp`lpg0KRh*diM73NdXi$cqIU$yB)C7)ZF*F6{B7980?WcFZQ7xoSBr(1dcbiTozBRd#Gopfi*npLSZ~?1Y`Nm)3 z?lq~OTSyH;ynLLPAQl2QH$M9Vid8DVJ`AU*bMIA!jplCAdy#C6F$mVvnz8x9C_;58 zzW4(uK^7IyejgQ@)nTm05L30+ghyVO{BN-J?~lP> z`8-c0mg@u7;-Gn!8Kq7Psfp`Lub3?|#gRkSn&Y!2&9@AXWA(AXDGc`;JmoQdAgB6D z#+4v(;GHdKz9#X5HeDb-a)pA?9i@})atK1=Kt?KN3y_ffK4YdfDGn^II)r@d7bua zDV-v}N!rTn2hlhqHYS?$#Ds9aUJV)5j>PLwJBB{!4Pw#(I=h2i}oZ{MQ5q$f+tuQ<0{DcU?!z+2gb@% z&Knm|w~PeamiSIZ(Wrk^*8f_v{w#*%ez5d)6J$0!Kxlk&95xWSBW@>VmHQ{Q)y+K> zA~(0W9;Kl=FT;w74a164dOeWXziR6&`#RHcLn-GJAteCw@eMrxyS+*;yVuzS1sH6* znnuUTm?;+zTo5M2nHk&2_`@&q<=Z2`8j-gB^ghbbQ>b5*R&Xc%SR_2Ai7|R3{bxJ! zk4oVmOw2!C8e{`u&zBdx9G^hdGu-@Xk#1un(Hk?0+iiCg%||M7Ao!eB4ZL4SS=?1R9ofLT`?Vq>45 z33G+pKT8QbDp>dD2SAvs+6po;i5qtHoTLY`5mj|xH#?2w7n7k{SJ4(mQ@8x7GHQa; z(jy1%QZcWm5Kb(V;*(fKFfwXO@!=x5+OG>Z`B#qJmn4V#Pv{C|KrzlqfIv4O{3NQDX>c=NIC^_!FXxidnR+}S#X_W-T{B(OF?La$?)1uy`V6)ue! zh`p5l0faR9?hZxa`F;md$Si4e%-Md_W7&ST!4NXVgEE|Io){$e*5wXJGgrF2xAeFs z?l4$Db(ofT1Jn-mDuVvMJ?7uNocyy5U{Hz##97r6_St_bYM@j6NyJL%_}~(!O218c z+k6!}MdB7el?jghLgJ419xHN_+z^YAxh02Hp{`w{ezGqs!@`J4c~L9Di(^^3FB3 zU|2n2V_4h`-6-Er0(tfS?STDOg?0G&a-@G5=lDk~>B*eHG(%_i7a*9^Z6}SJ)%UGJ z4vC*CPOJsXsO=*}vJ=Kd4d$zE=4Gc-gIo?W)}VRI>?d}ox&JNX_?rUCSwp;j5J7}{ z=WechD%cP6MGO`pCMr#SLwC3cym=bFWcaOqrrmz7*? zkPt4vYd5Er>(O#OEw>@3kwUU zrl+Z+qN3E^zKyeTTFL^Tu0KMKL67PLCwDzgD&`iLI+hIPE97Q9ZiJhtlO5p(Y!pl9Sq;o&+ns&_zXKs6*Wp}v+!p6 zc5N675IIL189D&@Fv12Z;wfy7sK{^nWhc(6GpQ}u0k&JMe!y4Fy7!94)D4~Cytq!7 zDqbuX)1TK9R5(CgA#Rn*{w6Kzce~a80pI|T8&|Wd77lr1z`T3A`l#l+hN0#=sM05D zSsT2_@i&r2Y#p<+4F?6Hld9wD`kkuIu)&_>6F9XSctuqsSk)@ylHF9o#c(A=CojxF zH0as8dk1qC{TQeNjXA-cCDZ4Uj+*&OS2v9%(SL%qgRpMDOt6rS1ka((<~b?-FGTf6 zS|-~yAqv`U?aCUJ6^U6hiI>*d$8no()RP2=`0EZA@E_%S3SPzgWcd^vn1pCg9A#l^*UMc5}UJTd)C!%QJ|F<$H#@!QI77Q6HBG+3>M<{igf zgLp9u+Sntv3eMV%_CKF@FCHWL2#6`Ukt3@9!UK(@YO1G=HGy%! zRb>R=&0(3aXrPUkH{=2RIysN9Ri=Mno6#uxPvV}6pm(=rfxYNn^kUdYKD-m71ufn= zW<{j5(I9R?+A=0^RJU1&XPTG)!~R_P!kN9{TaDKu*%Rd%D1hz%9Spo(M^Xsgabo|R zAv$6m_4m+rd*K4_)t-XG9I8UIgQG7CA#rj!OjwpT=bTozC$&@8{m7VQ+S$b68<@}9 ztdwu(sQ`4XF=W@H^|y!fpzwE`LVW|Rt*J`}Hd}40QIbI!K87mwxY&_@^CqfR1sGD~ z+`5>YoP6QS%qp9j8Y{K~#1gr9c!rx@54y7sTU-xlsbnGtg&dh)*5lx!uzUfaHlX`< zUvwS{G6PCL*~_xYlzQ?5;#Rr-L5+XY=0B|pG(v{YuGjgGuZbi%1+OU)&6JIj-#%>J z#=B3@=el7n65gQIT;oW?%UQ6#%bvDo3L^D9bV4DR3 z^y+Q&GnpaTf5tdSsZhd;c;`Qi=WC?N->HCgqTO&7^3h$R#%kOo+othPz+=qNh*m>< z&l*h+Hd3}PTxB)MKAtT8i?ku#_m5p~;N+IGLgkl9;T3HlEAn*M#Q$eRxT@2 zPAYRZM1R`f{>;n9t`lNU!wpZygerK<+@I;Fw#bJ0hZiXZM$CE1!px9l-yh*qb)2ZC zZjkn%^cDMHf8XqA;k%lyF4Nf97|ewGv#-=m=U??QDJe+-&~=ylGm2$pWllPl^*H=} zBfawE41h|PH#VWhluHHl+wfo`G+=-Num;^*G}idEg#JmtO~IPKzE(DP1jQ+(x9B8Q zl*Svyo&>gBBK|;hTRDLYb&9Ch^x&osQV60YviH>F^V_-S`q1(5EWHFC1q2(3U%q!V zr1msvhnbQc+_?g(VqNqRWe~mJRcqcP#lGw}9dV_OWq^Jsnl75j7&Rfx0;H0#6b)ps z^#QJr{C1T}a!nhW`Gb;N1je5+{9i-+0*nBb6Wot8ZIaKKL=-c68fQ{_TC0#pJoH)i zl>3A+r+I&^sWx3~n0rDpA`6olEq0KWICVPOGi4+N0RRvpqzVKJ3$c)!H-LZZkR6Ll z^&dc;XR3({fWYo`RQ=~)#Nf#%g^qkmTQ;o`M;{oCtsW#p6#G&7?-NJN-fc=lr;H)l<-uhxsH8WmsAQARVil!=CGxOy`O>XnvIHbQv_#Z9JdJ zl5PHs!~U9p0JsNH)zYvNK2(}2Adn8-Ix)n842Y9fCadbdS=vZC<}zUPa}G&}oTsoC zs-%X-q}PLC<}8wb2x?HcpnJCgbO@MvQ>HAW%NWf!>s_5kZ?E@!*S~x5e|0rLf*M#5 z3R0Y;&(90ec}Sq<+|w86A;dKADOf77M;t)sMF^bNU2>=iu&evSJDv7hq2pIL03WgZ zc+K0D@t=A;B%h%^fl2l(tG`mKLcvO)Ua7f5L<^0l@+tfCeO_Onk*tI2SQaljwcg?b z^zmdDB3Ij;;cf(Om-L0n$>%dmL&q)Ff_?Yhu&O)`H=#<3fAxa@{`&wJO5pXONlH6ay#&%WvS5?Ba2ib=wR5WGj`E656Y z1<`EZ5(D7ZOdZ{q7n&_*n)a_F{YU!%>@hZQTJeVCjQF3$18_Tut7cGDgz*<6gj+Sp z9-T`$D7_3DNME+{zJA^i7KC8yS;b6@MP4BY=-zFkxWlpCZJmjEM4CP2pNPPJtv}W% zWP1O*7r;Nzhi0FE{n73kl#l!R??SUz!Io`$-y_z7o-^65|Bdq@zk$lxrylYS`b0q~yPj}gfFBTh z4bw$~Y}T`Fp$HJyS#QHVFTWgXu229B#i`ze*nfC32A{>?LO$IiP7E5$((~bEY1583 z;&(3DEQ;rSjztL&ir(L$vFQVz9U(wrU%bw&s?lkFx)NYmwj=wmZ3uXe$kp*w zWZSr*f>~B|wN$s?D=NS3Nas~)O8y^h0upCXeWigN@8>(HEz&Fu*3uv8qh$E%LPG3@{N$sZc-Aab?*zW0QV z{{Oy*0m^a!d8-UkZBnF;eI2l{_{4&$vT{Pu=2Cn%_3(wLPjh+MS@^}XDFv7zASPe4 zUQ_#ggtFw-i63-JMUG%5Tnz$%*rXK*ygGGynIHXag(*5D*RK5jY@+b~TXq-_3<%<% zfDTyOcxf)ymlu|x*ne!&m@FHNGnL&rXQ&V0e-64SN|~YCIXPyF#hD|#2Q5S z@au4sf$g+Fs!0yh=AY(`$QoBIqG26|e(nyTkA_|&+?ZO|Z!{pe`0}erv&6e|${m61rfWkVq!;taA~(`$#E$Idy;-_N>mdDWIyoVr-6+ z(hgXY`th>LG>>{wE zNLB}wazlV_hf~j|`LF!N3IGb?okc&hTuu*GiPu_E3gz9fsmhnK`0>5dyFrp?3q=k) z>Yru5tH&U{>?QwT>Mo5#_#)8TTz}@wLZ;%Uf5?;2M%d1kAEPsfsmg23?Ky+&{e$75 zP^|fp-JHVa0jo5%VV%6_p3liU4CDLSI4C#~0KOjg`@X=DA1bHJx8m)gZsy49eAzY(U5I zR^C^A3H8QXy|DYF?Td?4P8)A72=i|?A!a?&7KO|;4akdJS8q5$o38}J{^%L@SC;+n z?jebV5?*lc4C<$h7i)%1D{7_X=GEJ8^0?Z=Qvloyh~B^-%6||(U^qLQzJ0@FlmD@- zt3Kq{aFu(Ge{dOlqu)Op{0nid9y8j(xo>N)^&N%{bkwfW~6_!NN3D*zBu zL0xCu2HSq419Pz}S5p1M8A3=aOz}w3Q{eq1DLls+`)3dQ|9xO{PM;zC^*|U6b--72 z1eOdYZp}w|>S@A6gPD(h*>!`yKKiwnC5%xDLD0`a9E%Ere~MKGq(t6*j8>}T{#|F> zhxJ4SMBE%^zmKM{0OyNxGu7`gx2N#VuoBbnN&M{r`s96A%PMktI^`U^PQlrjWyl=yY)A08iH6aoUMaE6$X&4 zicUXoFT3YD67=>H^Fo2)d;AwQvi?i8178^aOvn8zWFf`_IDOfu;YvZUNEuMD!3V4& zJ)jl4Fh;As3FX*f!)y#3QYL@OJ1w(kh#$)gV;N7rV1Etq!H~~GmDHWJ+4#9pUcwc4 zTtzi2=y&hC=eQ>tg?HEfrQ=XQ+fUqO5ifpTc6&`ogxiwvK5Dj@|PQIjc?82v{^ao!8%$AWGciV!XW(cd_LpP9T`$m^aP0)d$PZb#NrU^yI=pc8YE%PP zS!;rqH>t5Wcx-vU#w{+IV4H9bHMa~aIbDhws%M>+@zpUkZH3XTNw zyP-~qtT*xU@isqgpU5a>gT$aVG)V7sOWUr<-db^|R?$&2)eTg4Cqp|N29~!cPKLN* zT3f~C9Sir))g~v{KD=O9HqJ=6H}aC*B1_aJgw_`Bc=MQPOh(C;R)q_@lfue1`ltp| zYaw7bNl(fFoOD|TU(Uox!uV-^%sBI;g@7dvKEpAr2N%~s12c};0pi%_QHXyDU&{u*oTh~)+&#a{hDrl~ghE@!Js|E{J4s^gaFE4&qD@b$!aYu*Hh6%V< zsFa|m70l}iY666S#5 z>!s)1LIo;M2l*t#R@^+eXI!_RI{~N^P0d&A@RACX#a}df(qm`$Elt20u}Qm2#Z7)` zH2zPUXCp2rFQ(ray^ za3qbnk*D6D6Ts)0d(U^-Svubeo#zp}RNpYbd5o#A0^m^hlWXp6LT&yhu=(kWeeq=~ zNI0wxSK028rp`{-5tASI;48=+0?3kM)#sScex2kK9OMo?IOeM57-EYd2e6fR?97VK zl7xo;<2I!HiBP@w7*CI)a1$JkH0Q7P9iBPc=QUt4bLWG1)0I3Itt@Qpd9JQ5RC?CO z18MgA{bLlIeO%!NrTP_a4MoN;T$(KMeY#w$AJ6XcLGQC&k`l-~-@gZKZdnvQ4u$4- zmn-w>1KbcTW2;9qf5@C3HjaVMIT=G;OkVdND(C<00tHA=JQHQah)F@PPGUK(V+y5h zR60tA)A`9R9UHGUeQGJ;n>gg3_1EAl=(@L-<+GVi33Zi*L~-;B^;64en2sI=Xe`>) zWBn1Jy+&cG4X=k%zI)5Agm;_Dw& zocD3URPY=7&wCb}^r{bl5p`_0HrkEkDgLm7rxsY{+WT!sMKsL7wEb|Mv@WNRT1-QkGe3Tmw@^)doLE9U=n4+ zm&G7lm6-zmPDQrs(du)mm)r*~bP3fL`UGY+^xN}jvtkb#YJGirrD@QGdF{C=Mk6RO zq}(#^_qU$Ge+4+dlm;BLKmp>{D2QgsVn?XS3MG}aj6dx0?j`RZOubejpYsD^;bAy& zE8>%jSrb)uedtEo%ae_4=+T-ukXd0KDYH=y^%(@Ce=~RHCT*jU?ZLIk#1%@xc~x*iMM{D2~O?kC38!j6=z94 z#%hc8vopgXBjZs_H4RwSo+r$R+&pp+#metNKXpPHz~R}%yG?cn3*0b{rW866p~>C} zocd?YfWwBNBv078bntZR1!U8S3^9MTlmSXw=At$@v!+KPc^|+v4y`{n>FCV) z?tW)veBEeUJ$--iaq2>5(^xoUyPPB}&SxbMFwd6lhv;Lu9=DittB z?yl{p7zRHU7!O!);$f3nkl<-r{Gr)^pOoG6Sk^Ck5@eenTc9(5Uzq?~qXF2KGIvnK zn{6!l<*93G7Vi>M`_z|K`o zZv8QKytIwnTpIlb*&Hjg4T#tx|0+U7@&}lLmk#J!u$Y4{1B%?AnBKfIj8JI4pUf|G znQP1MCIIMl`LzWLi{XM8+y)Dblm=<2AY$CAN@rT)WGZ(8%3V_Sh48pl={jx87D7XB z4$95WCDuO889~a?8`EHys4AgdrFQ$}!fB8i3jrsP;We*(FBwX(-p1;gj!z+N?gb?I zWqb!HBL0obuo7wMjguBDzCpmi8oa91qD7q$!hIyGHfrU@LnGaXeHn@8iwH-lv8nPi zJ-*QFvR=)%)A;Sd5_DG|@{%~>6E9KBaf*WbFK*+daa?H1?zJzUvRbyu_aoEiqeYy=j!DV1lq_vjHC|k`i8E+Y6rAj>V@o$)1spC{1 z$i}U1d+eIy?nNPXO!tD42Ry<7?D_o4Yj~8TSd3C2rH}EtIdoCtJ5dI!$F<+JDWoj$ zHv8ydv@W_Brnqe(C}~3EGrjrSlQ#=+nW^pm$+<@#Jr8ggL{DW6%CSPW&xKS{nwTVJ z7x8`;Gc5bkS*4wuODT74D`v9R=cp9UHamUV)zteYVmX^T@qW68u*51_b}hZi_2RVE zrPy)(1IwS8yMICo|8)aUOb;-6=rKqmQZT8QY!V^OEsq3=NJ5gD*-07hTP0C|fbdf~ zfL~+}hB|BP(!0{8qr#vAok|zU=g2zV@qAHaLaozh&Tgj@r^-Xo{ia>;3sjC)7ZS9D zd}@Z$l@i|XJ+mCO_6RT2+1{3L+?sw3|NZmw^5WhTKm}kceOpMcN&@1~Q|jZyf$awF z!WbAue*4EAZzPzRZ+V4SMYrTiZ;!0<^6mi=cUssxnLzH;g)f?ATJ+IBp~uw2@iSas zHW9Sne4}uoS*U)2*_I_0ts(W8p*Ld`tI9cc?d-BwIYF3FbU6l=J55mWgH{sSM03}t zi?OE58Vd}@SE4$MG5jlMjFAX@p+E|ouJy;ePz+CA8QT})Ab?FPIN$AvLQny#(C)fh z{0aqSuz>*6FDk!BdROPs{_1;L00Wu)7&o3`3Bx%bd1evsXeT~r&Vb#~)T>5hjNOiw zHxXFeCOG5#4U?pSkd3$bG;B8JQhFIo_$B^ThK&_BKhc)N$CC*a+!ur?TZd-ACWTc6 zQq9ktQ5w$#k2i){&kSCwjilKfQHtP5v_|a6{K|yVK8VqOp+)#KF-d+#-a>xwSLSmX zEvB_S#|tJUpSDM*iCe;6s3I=r0#+RH50g1tLz~bm54Jn!{$wyx88~0zp)|bVz^zK{|vGdVo;Q=Ks&V=lkxt z=X=jP9E0H)KsI|n&tA`3bImnpy62mIKq+UOeOp|;?TPU^N1Mgt zeb0w5HCyr)6G`Zn%UA)BWj>srsReY{o^I*szNo)>C4s}$PWJ>kF4&J!n+>}fMgPk4 zq<923n5cGi_8Fk8K3Kr{ixevyMQKAiGc}$M&nR@&;=w(I9DfkKpm*0OX9s2|wDvkr8)PT0eU~yTIm7?1>Sn*m$t#R@gs#2ccg`Rc-Wm_N zLbLI@c$qg#bmNKKqZtu|{+!;7f$zjxuB?gRKrU~)dTgxFoVwQ2O->x{FGoiQT@NAdImn`Rf;`Pc!E38e(;oZyo!%0sqOFEe>)t&e|f6-cQ z@Kpc0Dxq}qb(pG(jOyow^@eleZ|T0xfsL8ji+|f-#Y}74Szp`bbo_6`o1KNz!&tW!Cyaic&IMO6l1QdL_s+$y&A#9f&KV0p1!ZDm|VnC|tg9RWNS3yK% zx+x#`FIaines)~z@ne6z`aYTAb`0oES%l4~ci%h?H}-5tOF(N6c z^asUdpnf|Jix^VeZlr>8uX%tl@~P-lSK=G(EQHHwwf=81$*Uw+I6wULpT0!*lbp1r zO!|ATvXXe-DDqjc4t1JMYAo#eVSH#t_!=2XZS*8-o6P*&AF`1CRn%Ht32&l9K81aD z%iJ?o*W4vYa3N@z%S0kseGU~0!^ZRW9gfnPz6qL+4U%&t`LZ#^58ga4_o1TFV3TI48Zn!v`ap4(E19l-5tlyu@+K)r51A9R0&F%cCC0p8Hhx-n zaw}yf^=~D9;S;gQS+WlO?8@w(^6s|khVDY8FS;&1jkNRS(grgnUu7Dx*!NXGUT_nZ z=)$9fd}n* zN?_R60~xiTp#Qim7mH>E|10odGT`$n2TA>{d*CDfF7_ssbgLS98m}>(KqZ~4zm1w& z`Lza{u=j?>2>pt0zAS1gycQ@x#JG~n9MluBR56x8KgIr%kUAGFJ~^Qi3u6RYu)2c3qPC`M<6RAfT!S>1XK~Ox z1age>@rPC_=rI!(cvDt2izAH|E@3A)$B$A*#dW>cj=cioI@HbgyjHn;7G8+OCAFIo zKpXEltusp}U%CESvI%-!>R{Xm+F4wIgC5kUe7jib+Iq+5j%=SdXw!D9TbYOh=^h^U zqb2o|Ti$g39NOtvu6_DgFu`~b>a)um>Vz>C*XMFGO2!YTlQ zO0Umik`jmR=1&w^-28`o9$AahMhwoHTq?0GgVsKb&^R}%+&$J*iE}i%go5Yvp}NBWPy7po51mKPP=(~ zhlQ%^FbU;Ay7MD;k(bP0i`%n!qRus@rv1Te9tWHA3HB1rwlL;g#!ctA&6pk%HX%WJ zE#<=TKQfY90-!GGC8?>jBp!)0TnGkf>ni876cTf-~EB=$Zf8%07 zh9uj?p4nHe$n}TYcW?7Na0)qs#U&@%&eGnr2zrUABf)v*y!79unZ=}V0(zjoPGtL9 zTIW-9jJCvw6cVU0vu_Zmrw4t96Lj7_3^)JtDk%LG_?2?(_P6+Io%ft%H|*>JYYZ~p zr`@eOXWFJ=-z+L52`pf^qN4uE?oR(wR+ zJ$Kt~8kD34l?2QyZ+`1~fn|mMIjn2mt-_^hKf#rHeE!gX+(pyYQs*n_+Ft3yDIYgC9zwKQq>f&S5*V%}FOY;_|CUUuzntpB1j#*lP&Dx1H6;%Z z=V?^m{4!JlJzg%qWxVH13#^1sQa#Ji2Vx_w@o|1VgXwyjRg&mIbYsH(yZ`w>0lqsa zILH1ruJttE_6le8i|=I1x&em?mX2rkj8f}Gb_kOA`1qQDQ@=Ho$TLpk-BvNbNTqTz z4<{<&6S=pUy)+nxrauPa6tZA+ePYnAaQtn$E6rFwl&TwYse4(2_Z7l$gxXwdHNJFUMBIKHUmvNVa%`gwL=!D#^GiC0$ zof4^Uo^mJl4rrA6gOkT*6_Ub1nzRlA0?5LYnnm!1*j&GsLRZd0mp4A ziO$0q_wQ20Y4Tjz2KqLBI&{Y0nCw8-b&~sM%6gXw2b%5H;bk4sJt3KKy~5JxRb(@ySc&3Nlywi;!%n|o z_kmRVH#}N{p|4eB7sFTWM`y`?q|UvYB-(`7sag#_A*hxLg49dFX9&`=59?uTqqlKC zv8W_iAh(T;r`QX57am0E4mB1EJYuGiOOb)l>sCVRKalGpkhGi#pa(oQyXR8OLs z;JGdqMOQWyr^JU48y$Q+Kw4JzW^PJKcfenN6^YKDH+;4k#(O*B>l{>a$q&bJv} zFhqNs;Hqe&UHi4XA;htp>3IpMc)L_|iJ-=74kqd6Ks1W;z zT8UUk?%OeQlF2w+#Bo8klJ+Y^YXs=;jv*yZG+7Gw&>80Z3>Na3m!%@%SulD~YT>K# zrrx+IDGpF6WkwqWZ#c5rodY7b-L{z!8@Al4DuDW)WV26&Zg7qT1jH>E?i+H#z44La z$TwhWw+yNGIoFsTaO!ikeei90zTej_4bB9?tp`@O=lSidD{W4QfNyAwmcm13|Izzh zY~E?N0}lV#Hw^gKj*MtG0b=#Jw&O@w#bD4I{FW{cp)s2Qdz}Chh(yQCgs*6A@O4F2Ty;U5wmfwE0V=TZ z{#;w-J?q70ChB7la9rITU%%=arv7ubLf+y14O<;*IjTyt_^LZ~{_lVIJvaF686sIm zXMrCB!LJA#a}H-wc4`SAAP;g|X%{uq?puqT$g2JP^(!GkwI>&vfRsG>T`i>dtlJ7F zlzH&Hq@QvhkpZDa;^F~Vi}%gp1Mv7hT1c=;LLE@x@{7Kvd$EQ7dJ^&3Zts0oC%gDN z(kRL_#S=Sz@Y-5PtUeICePq4ft3SQ^RgROPEVU}wh*H=*$<*4MZT_1CY=%qbZEtx< z+w7b+?wX6YeOyhi&gvOx-A+xd7xDv(ESOGcxnoix+GHVKjvrl6rpVk)NsEO^?j6pH zo?qYb#NPQ6fhA;2otWgl>x3_tD}v8c6>doKzvgQ@Z1zgrDk#BNjJ1T8&gatb7`MP$ z+3(warJR?Hb-ed?#lr_B_ZxOGN?=*T)&?b(Br_mopUMb7NWST_5FxpUgJHbnlQaX8 z6rqTHz^v8sX>e7lV`z$cJfMnK_7p)maa-IW?i#B^*=rF#DMfzKB5CkIwu-nN2Owv7 zN9H~6%m^S{!Ao7I16T-~YlLwu`KU zKm9?O>hUA4?fiR)3@jeVA`-7C;7X3b4V*xn0q>aWt>RudVx#<4KM zEG_j(zwA0$SHt4f{s(PD@3tGml2`LypQH@WlG(n{X4u$~JQF0hwXovtrZ13tG5J!f z$)(gK=p**}e%VFtr4>}?1qyr5Ssp2_ojd(A>OI92)P)3x_H^Cj#DC$Bl!4!^^9j62 z0+L};c~@&B&pExnwpFuXtLMax8fRYk{50OCxjo;YCSZeDF~;jB3!&DKu=VwW{cTR2 zL0`{K7c`5dKjrNQ=07T1lU$E?&RgTEHaXG`F)abH#k1GzP@k466*7m+4cxBF{d%oJ z{`V(OIf(|XU1mAco?tBrprxDlJ6V*kJlNsE+3`Mh2~Y%QXlwITNZ3xE*~d{uu-mnb zCF4XNOj%LoiBVlj|FAa~Ow)P1&R_=}e#WTg>)Z4@RZ?p}@!Wqnl!|CPsc(7{ZWYyI zgO+&*3qzKZss4NJBwIM>hq8^{94 zYWkmLsohP{0@4o3kB=#zJ0i=zky9Ot&&4x>aqDREyp4-QoMQUPn89B1?+rM^R~>NFy(LjQ!Q84bBO6@83no@rhup zJlLa`Z$28`04&NN^_N-X{XFW?lZLGyH0KW6X#!qBav54YwR~S`Htm{q(O<`#aeJHl ze5e!r{q2Wo+w^HU=@J^6yk?iELH|uvH{i6T$5gX8QK{XCv1b!AJumefs8A>OcdGe~{HXR<-6PeHfn3)$-6SEcvoAE0i7Icbj>| zy-O=@q(WX@AGJ>j|HL2?H!ln2{g@b_EX($2ARk=-w^)o_83 z0d`4@Zc>=(LL-9XXB6#%YWvKA?lY#Pr^`g4g0(j9k;7&zBA>Hqj8AGpLI2H!YLDVB z2|xIZ5-#w^gcR*7u9qvDm*TLQ@R0)JSlA8Yzp78-t4Ha(9LTrW@ZsdZV2_+W!SUDlfcB+h(2R?GDD>$le0sPFh6KOV2mw!%GDhwd~Pl_P)t zsVXyV(r$2>5%4?7$+-*oD@4^fqAK*e>?#A4%h;zEv9}?j8ZsOEl^Ls>pWE#TJLExfq=m8n>CMd`|leKhO$={yZ$Cykn4-xh3sYkqz9A9Ve9UICmr}RN%IGwKZYrLa2u#g%6_iZy4dzv)H}qB+w)3FqK?apLC&2Mx~F|sr;LtXy&nl0 ziRZH(d4~}8HB7!<#AgsjK&5jJ%UKHB__mZ9#)jK<_kUCbW)<{-cA&skI3%q~YG=vn zEJBIG(@_Y>Ichw9y6_jPrWGL#PrEyJ7r%La>HS6 zlmq9#kH^UJ+PAamUphMQ&R$RFJ|A<>-)rh(bKWBzA*=V~ImWtX|T z9;p01$}!bxTr7Bupso1e@(ZI^eGHAi%Q&Q`p9>srInE zA1b@ia>OEP&cU5BX@U{BnI;RL>JfRpi?Q41KmLskNgXoiyZxCmMbdJ$P$56NvbLdZ zd&=~sMC9fAH7EpaGcR4~$-(m0e|vE$6gqK_a=;zh0H8eXEj{gpfrNlvP);=1HDDm{ z^VqTW{Y|GSr5q`QP!}Sa|yAHs#Hd-WI_>G zp3+dw79YSO<{dimzR}&wY#y(izl$m#jK5oT+eXR9T>SYf9s@q-@Z`}ZQ@UE#zs!Yg1@~TRo z8eclM3cGJ!Yq2td%LL(d8-*$_%oR2f0DM7Hx0Zu2X9qAhK%7^q*_()i_0ef<^gk*a z{3K8LB(s*)rBT^tFatNCCAitLtBFN4yLc2&H#nyPGTaXvmhjMXxF`M{y^V74@dA34 z`x|i2(umL&z!}92P%UqY)MAtMiUTgr{pSej=zlP2OBb0fc;LG?Y2HvKko6LC2srhe zOCi00ani^Yee^7l{8tG3on@YG*t%^tt#n4d6K8Ay75FWK$Nd_`M09y*1r?Z3>{eYpXF#uh_0VTWpHb@5{s((mIw>am)sk`Djq= z%~&f-pB=O=Jn_$cPb%9hpLp9&uiqS@Q33U6sO#PBOI1{y0#qE|Hb#3pj zrn{xa|oCl#30Mzc!c;_)wnRRnLw+hoXy46maMigQ3<^J!fo$W_)3er8@97j zDs239cr$jJJxU&SGN$_{JsO&A!SOV$aw|}OM@ecn#-~+ZhU@8k{`=NRxzf8mbEKC$ zsEa|^)Q()D%eHA2Uhl95s4Qo4<@&j?_Gg^~BA(Jg8};cFC4jfcx%zB==l;>|N}uSU zTSD$jdE|BHOXRjTqaj5==NhSy7Y) zo~B$%C=o&247Xlv7%XQV1_6d+amoi%nxtFeh6(|0A6nZ^%bn+gH*m*JN0A#-o2y|k zh>D6?gYH)jMRT!Lv@i&9Q~=a(NEjvHkiuH+No@xU-YC?{bRo|vyjp`wcR?K4H0>kz zNVDJZm2~|$C15}r0&3z4_NRbT(OsG6;!lUagHK!Y-G+B9pr`vgfT@@+*stFO+D^_W z6|~`52>7lU$KHLM{i?ppE-WI|E1^SEHplz5)8tC=RU1Kp-`_Gb zy<*Y9Vg()kbIJzXyEz}}$pZZImjjeAhX~qXHt$uv*H)IMm*)m?_1nz5zR!(Lif%rr zi39It&{rYJ&*Y%b1GGXjZ)>JfLof5f{msHC?gKS>YAVzWm#PFbehZL+Yve0t0_BzG z#Xjiqd1vY6R_`7(Om6dyL1S#{H^D>RLS?BTe&)7dDg z5$iSPN58oDroqyg0msE$t1vr2HAKCU!xrA5XXh*=T<%T-pQU4aiQ zC$N>}UKxmtNx{GInuO8Ug>VWq77`^gTA=YVNj;nJxOOy(O;&$Loioy`!D;etI75@| z%&5dM134VXVl@E)30`M(13iw|_z0;(m3F}XZ@Hs|KD{<@C8a-fBjpK?Vxhoyef0GC)VroX)mlsA zS|~LsZTgOYKY3?v(@C4vmI~q!J*T6`bYJlL*HIcx%$MKKhccYYy^d81C43|QIvZZr_{+Fg z@MpEVA$|WGjaK$iKlVN;ogIhJtTw$XN4Hw#$};9JUzN;zIClRRS4EC0;yoLi*~FRP zGp$joXZ9;$6f1=$Zc%djQ>Xiw80aTocsLYKMhQC=|LKpmI90yd2C27G@^=o?ta2b? z!FkP2)c_irhr^!<_;zJzr#`{6I`{9s^64Su&5)koqE4$^Gv^PZ=)pb>laR!)r{4O5 z#VIf}-tjYreanHL36xi~D7QG527GqGz4PSdl)k|0SNI?7d@%InWmrG{FCZhtLZi#B z?0r^2fLO=DG^I-3?ZFZ%%}BiCX2?&Cp<{dp${i0i=m4Rt+bEBPnz5jq6{>4&-&!V( zF}vT>X(!FvpsWrBhauhdc;R^xQ(Q0z_~Hb>`|3`&1L@jjEJdovTM!uI#DSOm$L z(}r$|0&coVr?hX8>c5IdYbgm@*TE^>gOlw*b6rqhOA2E zsSCuTY!Wc+5@I1s)$~1DO0?`3^RWmvfqSgI8)tL>{ZPMv2q100j8U9tK-%_&!U^^( z=Teu~>u_~*Tf*kd65#!Uu*+&l0PKbgL6lX4Fo$sg@bEE`@`l%3y=LzhnoVRtr>z`{ z_{eW>HRN=`fP$LE%6bQ>bbgrAKJ%Jx(H>(lBk}InmEo1z;TQK-<@BiA(+1eq4RYz& z_L@rt-IsrVO9KS~3gY`?Az(?%&TEeE!dlxjW2(Yue_mLv7>_$)kBP&??4^QKmlz-o zZ#PCNdK>|0An+nXhB0rRFv>^G@Ijsf8Eu7VzsHppD z5K76b|5K%c*U_35)Ip?R|8(mojY<2qm*r`?ojw%4Y9cqE9&ob2VY0HDcg0H@@W3gT zl%GIbK=$>T0DB7D&rn1=m-JtX4b_wk$KLilPr$n5v5ER2R<>&Sjv})uu-7gSuMA6c zzRLCJ0q0U)OxcNFl&Q!0o?)t%dc)xwuhQ87RcVlM8+87a@4_xM@<4LMB|0>-9tO@ zfUsCCztmQX>TJlAs7UlgY~q0V>jUjlb5Yz1c?s_gyN|Ow{JcpPd^hZddoHv1S1$E= z?o%n$T_2f%`tK5y$L&i=Hg^Y`4Bcq%-%`%_f3yG;HGf!u;<{0(+%1#-pd(|kdl*zc zoYH?o*nAgAKI6?@-KWLvZ@m4JZ}77Hqk*aiK@;da+an@U-;Z&2>MCcYtQnAsPz|&w zJ$6LIY^}#5z3Z%^4vBk=nMVK-NmY;wV(NW69I@G|ZbO7~*SRH!BnZW9b?)<~x@7>I zdiwJZ?ejto(@^j&bFXxu-s8a}trb>>+2oN;oM`MF zkH;Z4u>=|qn_LiNLFB2TJtK97;vbJ5*-c$aAv5myU4{A#lxnqaMUe7#oxS4>?E4K- zR(ib0qSHz*R7r98Vv;n(F}Pwb=HF0~htL`uyev+s(w7Zis@wz6ep z8skSDZE$I?E&K6q4I!wCn5|>+<#ll|>Ta>KIbI?8%X5matRG~`cYC~}AiFcT z`OG|g({}=KLUB4U;cl)%_;lN1N^$3L`NkfO^7#J(?-Wx0j!6@K9IU-Snj1^3P#$5q z#}WFpOap*`;&lTLQrgb|E59)EN$m9t0QA05%~P>A7U4FpP;+l(U!hApj-CXUp(-sn z&~7K&U0p#NxEK9m<$)(&7%@({*h{MizBPZ~d3oGO>i%Pw%j=y2*lO259~nD=r1rtF zL{NnpythCjwJdj?m2|!@LUNX=Ev0I@K!X+QOzMbC;$+j=^HtN^^KF_B6MW<+R6L+p z=m*R4gUu0T*yK*r=WC1MIBUi8X6ME3Z~C1c>DRKKiI!nmIX8CN&QAx;Z&CNcFiKf; zhO^M~(}{+~j{s0>(q?pua~R&hMgUBhShteYBzM37=eU{o-zGh!&425#mBFlG#@5I}fI4-} zI8H?8xo0@}R21vP9UH=OO9>?zQaPoMV;R4`_>MdwHOr0qKBy>Xa#@}?T2wCqrMt3U z=O7gLmaAN=L?XF}K;XD{oIPfy6wDz0q&k2lKdC45><-Z+^SQKzIA1Y#*O{Vg8A?6g zXyu?|89|!74$^1qT1iYJrFnS%$3{WJ?6;zvM@^yMnuK*fxwDLc|)S&RS}1`e|Vw77is?>@po_l}@Tt7iw&Fk!n|s_hzk1 z?7U6}($kr9Wj)t#QPrVrzn8DpX0;#sDSF=h&;X>GR`>=j|8xbBZRM_Qbj@371;>J! zOuMz&Hx}@U;Dzc_4n;F!|^?)E(+!i(VVIJe`qLUcGK<~U8rwDHv* zArEwKNbCLkYH?-W?6+K%tD+L1SvkRbX2e=Yl(1uthQDM9Yvw#Uzt+^;oSo_e>fke* zR}z`}4Ocg2#Q^`u@tSU~z+Wgyx45#xS2s?7$~F;rP#oYGnf}OdxRssO&Qd@3U;*?m>$w%?k5zOpKbV9|{W;JsRJQ+(c_Cm^95y zoo&@w>u0rAn7SG&9`#d!GiOKnlAnI017I@cH<`rc^UB=!4gEzKGoY`jUT z$RyQpsU`xR|L9gV$vzG9*h8Sd29#v_sYj*Wy`60NZpVkSHPFGFi2#*|RU09!MKUP) z+G&5e-Sfl6c?n*?W{T}^5x0NkVFj*}rI=;4Qp70$-thXo%j?2r1GhT1CPOaHBF0hQZWonzxl-M0 zh$?UYeNio6fK=6UP_T(JqV^O=3UHj*66q!pp)DY0ZG`APP9upmM zbZtc>-&L@LbVai9NGq9UR-#?t8tl-M?R8BQ(15z%c(j~KcZ^|5pJ;G4`C(6S-DB`w zIq1U|5t9a|P>Tp5tO@CK*s1;{m~({Q3W0|LiC!7!-0r}7_s~O$Qq~V3_cj!cdL8es zh_kkuRVdmQwI=O=rvun50}oyz@d;-6%Jz;V+Y6*iG%Q1R64KAcr9(kzRnIpwUBX<% zI=WL`gXX&amlG{kKV1Z|0p}Iv<$nDwm|RF;{Yl#(vd zfyMY?wY7H>eNm?Ccbh8O$v?NT)1AL;T=_k2{*z_m3@1y`1D0Wa7VsRe=G^pAX~T%H zkd-)(@Tc6wlusm2;GDH{>99^qyz^H>~&%m{mU3fHY!v54o8Ol$d zRl+HH@h{9Gc8v0t*K7BFhm26blI7%?n4`B_HU69Y9Zp}cy9vR|jpF%d$pXfS)E-BT z>v@%H6F_Srw%W3*H%;2TtN^GSYe{gJ03vRp0-%Ug?59=>dTmbg^JVztU?JmKCkHq* zifIePAsCDCqrOO=v3Ke%#5EA_XCC(VE@EC5~LhlKGnt8Q_eA2^o{(VIiqIU*+%5BDD2_z z@RzH9Tj>8=;4QhS`t?R-!CMKZ6h&yjvjCwtsS}UkhmSh#W5eAj!kF0xdO@i7^x%v3IWHtad`Fni{LZw*#8XHB6 zD1}Hw2r7k=;`Oe{)&}*or&w=LSnZ&KEQ8&7Co8BMa5@qKEG&)7Yzs5|9Gr|8_i$v} z1U+=7*OdK*;N~#pEn~4U%s%%(@zh9n3G4#BwMdsC- z0Am8>Fwq?j*=G3HV_vSaX6O9s&?{6ZTwL|3Z1=A_>5I8MCQn6=9esTpfMxZXb258i zY3ft%P8*C)N-9rOYWo|G@V~~)OJp@VoNzAJ*xvwp$y7T0dut`lJ<={3p8m{))N+)v z1{LfN)P6N!!sf*G#z$sRn~89wcNDukEpkSG3TVtlzXz|4;d!8l>2=mZ6`IY4iFBHA!SOer&%b}e2?kEAeH{B^W{lw zlX!RAofnYc7DqH60M57JiLGL5{(y?OjKh4EZF?Wk$x!)C{nzqoGB5E{$p_dX7!f%cy-9 zzUZi!0*-;zoZ#|G7YlIPA>It`)lsPrF9QXB$(+swkpXy**T(L0Du{QCn)wm;TZo0Z zQEmON<`aG2OT)kS>xEMO%qn#L%1$o_>O+CbrQ)^HGQ7vxq3iInVQuii%QU5%=vVzg z6nh}RDbHA)Y1-%$p~4T`d)Xp@4UOF&Eai>cUwy@?^W3|3JvjS|f0OX3t*}=Wulbd7 zV__L&2O}Gmr+VJs6BPcHIrWMoa7u*^x#F}xZ4j)615}gm$a-y58RVChsb~1_^i23^ zP-VYTW%pano_M(D!?MMex!qJ%^-3&DF&$`!;tfUn8(V$0=O>zl?UV?E&@*=c1U4xj ztg|1bZGh#)pPcw7KY0822ah~)qc%r*Z2K8$DA17t9*m7fncw#hmqr?#-+lLSvJ_&R z&t#ktS12+M-ng*v9A4q1>&V!6%ki>OFSDofeqj4gwnD0b(_a_ICl3z~(u7YN0{3;) z-#?UO0!+QyzIeXo8|6-HX=yQx*Q}Uq@ddr9p-4R6uvH9quIvvwLy8L`%w~O8*oLzp za9|tAZZ`fj_w>j5yk(>=iOSr|?K%RQVIYv#DH5aM@4G*4uwdY5ij*Xsu*M?G-2nI4 zmk*fgN*Ch|%)fPUK;;@LP$eMmEM3aqCVKmXP+o87A~z41YQ>NZYMw1Y=NsiU5=Yr zfL&Vyu%ZU9HX|K}vlhD8!RlhF%|%737iZHreRzqI=H)<*L! zyCQQ2Ul!C3V974YQZ<>UQut$=B4szFfIy;dCc=d~CE@CPu~ZlevMNe@sJEj9=fqi# z6=wS(Z#ACAW}ZB)fN#ol*$>F=stkDAI)F_tfE!g&LK;ULQHyLi)4595#>t5K)fm5j20&bTD( zz%AMWDA56jKRJ3yY2-L%Hsz03b1Bi&`iR|hyx}LHG^mllre2x_I*QDIPowzTH~Z@) zoUStyC3=EQcm#3!dhH-mUe2>AO{$^LF1m%HMiKJ!?aq`f)*4w;K2m6zZR)o8S9LJf zJCWRad|Cc_X`=cAB0=l6fI_7TMJlJaQ0D#8tA5^M6$SGkkWjp1B*(im7rG{FPW8^f zNd(eX%U3mWqAfhl*Og@3acPvdKyye+zY=b#q0<-Q;@yYig`+ zDE}qn(JTRx=z1n(($GEKO8a*5)M9VL?T28R0(M_% znXtcqm`QslJbjsdsXbzt1LnS(N;cj5u}JHHw63_XSCw`xYL<&nQ>Mzk5-)0Nt^L+5^6g!~QUe zWy0Dh-`iOBCw!mtbi}w2Hf2#9u(9WujGG)*m0f=Gp?p?{t}CoJMx?b~I_t1s>+7KT z9o_p%PvXd&xAClmKX`Gtl&`yTfQ_E22F1CLW%sgqZIGzSE#PJ2k6!y`--qj{Ys*YQ zyf9Cx?{uhCCbEcCPVSxib@)gVh7i@9MCNeGKBz6eUl)`)w_Di|elRM$t1sQfIU|W^ zk+FrGIMf;0=_D2nOi;b)6$U*jZ`Z^{~$#Oe11z+M4WbT5}9)J8zdr7{qPG6$EFkpxbdZ#&0C6((aBA+OipRlLZNwS;i@^-}1qXt#LmQ}hjTd0^Z_V~HwB#|*?gtdxy0}ze zTT2Q9lAq7RVMR~oZ$42jPLmd@77li-Z*9=U zQBhGn!(6>-5re`xUkoMy*@&{(s=Rd885(oeaDufsr**bWJOnegm7!yaqpe_XUw~&| zb#l0Uuvra)BmkA+GH_V_!dJH8fxv1aMit;ctN_G|ayPUytiwRIvnnWv1Uro~b=Uae zxyF49&P~8nz}h@X1_AB^Kjs(-+b9VE;m`o?zUT+pM#ZelZ7&p^d9jvG+*#m_Y}M?T z6*B#KsbNDnvG4r&TiK-Xyj0Y{&EdZeb~jI7B{)yxCR_~UjyGq&9|I+yQAFM(s=_RX@C;wAIc3+KGgvqarZ%q_tTjR_Fr05QYM=FvQo_j(IJllc z{dlu0^2>^90dgVJHDfx{J&mi$0>=g6OMQtZvEF+|8X0A;XMF1v0U~F0=dE;~n;gH) zX9Bj-dnbMnKMhaj*gf1$R!Nt-*`h26N{uYy2pry)pxgKPY%fALHKhfimtjHS<;dlH zm88b+WoX&M%r8wG8n4CR0hQOubJ~xnP@8e`OW6gh&wohUo-8+p>=zhL`t~=`njZT8 zDBjhQ)-s-Pb$rYw_^)Z(zntLz2fo4Bv#d#qDb&)t$;*KW!WVOWUZ(Gx7apPI4|5JL z(|G^)l%|z?7DT66?6bS1XBNb+hp}d%MD(^Uj@?h~gp_dcGyjKA|JMclUq7PcOx{vI z6L@_eFN_E(hQLss}QW>lyF%278VfWlK?NSKZV7ssT z|Nn9S>x2L8e`BvdC=q3TRI`#y3PeMDeIqOM)FFDnIP8CodGnJu!$jZpJC5)_bKsT+*r?o#G_NhTL~o4@7ISOp}Y zn0_a|R>AeZ){64BVu6ZIdde>HNftjoHS(=Y0#D)(kQtZE!Rvnzn*rdZI}W(7X5ZU>skF6Fi8`1jz62)5|N9}5eIf|QjCSrjgz2~s$Xs1L9U&rbJ?j!?`xz4+ z=_#}PPp_DFamBGk!z)(d9|rk88k`Ibr*0MO*Q|(m|Igw40FU*vM*(lT3S{J0%0GUX zI1&p0wy%UD%$Liig;^UCUr7D1 z(9-Z!^f!<409kNa5@Cr8U3<}gTEkOH1oat+=?3X{zM09=9?$VR3E|FdG> z{|SipSD^Zp8+MtL&mser7^v(RR_Ec+_0??X_Z$4jyQ1?w{oYQwBo~{5BGB$x@k^Q3xlzd~$W}fplDS#IkD3QK)kCJL1R;D%;LhRb8$kPKK zrPOd`m1&8=s?w50`C!s5arN>)xWLdO@+)+Cy z^p*i6A1=7g#!6khY`MUuwU#OPhnv<;6&Q?W)lRhJism5!#?0-(=*(ZSiTKsmleJN* z0Daxi-_=Wi*7e};H5&fyB#O8*7n(A{#5aE% zI0EEJc%tMv^UwXi*FOPhQFkuhde2ld>FrPF6XE_`z%SM_Bn1rGM?zk(H=Df3^y_|I zNl(sbA^0)T6x$r};vbY6zyoD@{Nd{Xb$0K=o84}gs15rSPamVq9kLv^XB6o}L(E`0 zp+J1F(*6EbTxJ;j!ye-^WbZjq2pNq&@Bc?uxx zS4D_{Ko>mMG3p0aUyuApz+MH`O8}T!4{W}ooAR(DrQ5QT36^6S6RGhJH+ag6?{wn% z#hL@YHQKt~(gD5w2}ZJ``{k0O@V-m`<|*~z73pP$e)=OSg`#vE>3dHU0> z{nL3*v55u)I1w7)Ln1wLTp-IsD2jEEqD}qVar%FYjdq5C&uQv1B|+vt&2Orx{1qre zwOD3+7k7RpyPa?g&dxRA_+jG^N#GePPW_*EtxIwy#F{^|Ku~*wyNcrd_5)dSx|5n$ zgMWj<>Hhtg`M-wBSB!zLbDxh}OWqkmXgY0#DD}~AITMX#lHLF+X7v?}%>DhbX11mR z`3rn-mqJ?LFIQ!_mn5z+g*Yto5$KJTG{RTUDp|A4uT_1CAA0aDZ=1kXTu+xru+V5!f67+UatrR;yd=D%eH|MGqexn6AkG$EDe zZ!#IKRGQwkZae+<6d4dd(k|&#NB;ne2HE}3$MJ--#s6E}`O$Ssk(AV(pw`x7wKJ^% z5yV-rPQ%+LqrkC>5!+0o|F<3Y?}+*TCqHA+ya@TVOWjvCc1co(5%bl={GEKhr#Uyh zX`)b84T>gifc%qOIl38^&2Wj#xJ;?eCsFrg_fl-nS<`w~=%`5C^vepmemkum1>lUT z#b*q0{olv2qD&we+uyYWMzU3(aGrGB=7$ptX(#IxtR1NVcws7SVer=Wv37$cDan<( z8-c2Z0)Sy*%GccXgW@olqgK!PAKW~vf`lI3daV+0ANLz6J^p^q;s1Rej|7ymAkU?x znMkhW(SAvYf6~-)zNP7#gsMoE1Sdd0!0OfS4IGe&)s_6GM*t#PVv=}!}%M(94_<628K9W8ZVJY=AL?)9O zKV)?vVLyB~7V!pq6gedV#1rdRRURn<J^UA9a zl_FE)`bqeyx4HV1!LhHnm9268b-?tV1AJs3jVkc9aUk%WAMDGQzB0ki7s@wDOOe-- zC#80Si#kN?jH`rI*2BA4U`}(NxGfPhbaPVnI zp(f1#<+%T}KA9PC@MZP%FmM22SZzg#YMs&-Oj(n|_7d67Yn)#scM0|w-IKmRw+kvH ziNNv4VrM_X_uCKBk~sGQhNrvqN9R8>SiOb`EMUw#dPW!gh%y<^Kgw{a=qE z=rcaV##BTE`NuKf^`)_q2Q(YDaZ`*#-JJ)T-ngAAl8T-_-TS#RK9P=t$23@uIrQ=_ ze$%g)T1^Lh5yghuOJ97ihNnRiUGmGy^NPGdiwd?UJj;JyVHo@zFm%$ZP$eH6+jDll{2ICsS^u zJhR$Mt$yb?s{JRr_stgh8{TFsm8=Z^|GE}k6hKjhcEWXA3$+6#9{^%JEaEE6* z)RIbn{i$rAuf}Y62{`Z(Oi#cEiBW-`B-$Q!O;_h(y^?<7`bu10!f`sUnm8I<46C5E ze#gfrD{7>^=hzVr2D-nl=Y~2ro4;}Euf;FdR4Y0WW*KZcnHBeW_2>2bZ?AShG^nqC z7dNIG@*s0TdiL|5ni_2a)8Xzj)VY6srW;Q7oFq%#ZR*cK7CBP8Q^LpC(5|53KxL$ zE;yTWBb}+_+U|$J4l06!&dKuNWsI#Yw(UdZorEFH2JBhu&iZkBEStJ{evRoG#%!x0 zAK6xq1v;OMFmIpROt@m*CAY~qBOZS9hIF#RGJI?Dqw+0oZZ?|e8alyJtUo?o&^1CR zK!#tw#Ojn7+y~8r_vPhx7Onxh4gO~FU-0!>e6Q&SwEQ+34JFwJ3m&ni9ZQECEq)BS zf_Wb^wj9g4k*`%ZJ$7)FF(RVlf!%AL(dmz6SnTxN7wv*;{tq|89Aq;?3BY%`l|`)n z{2bR=_32FELD_dK*OP8KmY}$Sw=Ku*iVaN%M<1R=lG(szP#&9OUuqnu4N}P|!7l+I zM8*;y^wVU0kXzK^^q2LjQNh^%_1KwsKrA1&$R+_98f8#Vm=LM&CR>5;DtUy{`DdM( zn!7WaNw+Wdl;O@3oS5PEAC#A@Sk2gTLz+`Lvh;WFKVK0nP+#$BRMPC`edBiUg*3_a zGqnE5NjOnmnO28sI0(tDTN=N(Xi+o_W_lCApzfEiU&;DL)X;bveI>jw`obTYnDB~w z?Zo$`h!{p2)H?k!QILRN<>i$FfXIKjI4W!)#@wRb0B+5jP=S@b{=A#i7u8If%&(pO zmc05igXU(1|`xQyE=&Pvc~&-s{P^S*ffo9kB_b3V z?tNO6KH$Wsb1VE}5l|jcb2MfbKvU}*^Z;jS`rVeBtLuB`xu!Q@#CIR#lic4v+}+y` zb#&N^bV}az_NrD0Oq1DUm_NP#UuUv-On}iP8W!toH3J!Ru;V(+hL-vTKGGvwvFuvCfVSf`~F!}sY zF2nCB=9aZ8bWbAoVZP_goz{b<1zeT>pJ0#Dx*mJVh=y~9eOiGG>b18YNenty2;0zgeln%cCaW2*zEa%p-C{2k6M~TBxPIpQs_%5^pCJn~0u~Vuo z08_A8QpWBd|I%l#^Un=g=`Zvc^C^;Mh!HBd`Mkr0RP-Cxc_ zJNKMW0)Xk+7O*o^ZCdGmooHnnHkVGnwOcu5W@WpxXM=CEa;jmx=}Cv2%EJbZsm%FC zVSYZOa&mV#hKw=>ZOgA^F~o}+FJh#T)2DZ#(CeZ9EtKF@*#!BC4bFnTW`54Amotdt zOEQD2%?EL*dnN)u>{}$kb4;$4aUb`@@AEMY7MK&#Z;$?6m#M0hW8tPygAPe-k|jH1ZuSWnj)R(=fU~8j0w&q38<)T#W#|)N z9+LEEWuCubQ@d$5D|pxSr^;TE$nA%Jx7$pA%+sTdvy)#X&APb63`Wf_Dn$M(f-H~0 z---)D(%im`2{>b2(6JBn^L_2t)>TJ7<26-fi;rwr2 zGjRg@w-2@^hfe{&#>myx)oA+JlMjB9Vt0(Zm*0Lb)GmsjfdNUSXXwFxqedVD2?gEh^gJD~=ybrCt$u(nk1zeQ(ODUIgR&0-LEG zqxT6{Wbcy+gNAe5NayCPvF_Z?%jkjeH=LgmlCj#%@l#g0E5AQ2GHvQPRnSR} znq})0+SXt4uFf==iPA$Gv z+65Dzm%4y{-tufXUMm0LihW(`*sQ|36zheoe&y2$hv}NW&t^n*m!MC|{`U^BRZzM6 z`r~)^+;Sfz-O18FsAU!}!05eqdheViLPtmCIC9{o#4Z{BJ5A`M90}QNO4pYnxss02)u*oB7Z@+HF0^{apQC#P#2j^0kvLIo$`N%&-O=UT}{)PHphq zsu~`&nfmiCoP1QDZ^TbEMTWw^n3*P8VDIr6bDIB(;7bpd+e2r`^DiTkDyZ2_LdC5s}Y|rT^seWxQ)@a%5o~gH_JUm|>}p@h<1Dr+H}S_)t8^iqggzw{<%49`Fn>eqz5)=X zRmxhCfO@A^?6~h~e>Gq$4im7g6ZV$6zXF8;Gvj6{dCyEgAoC}2hG^IE3%^ePv*B02 z8hBZm48P`Tgu5(*!+Y zmLg;03%qxMd)DdRN~KMwc6%wAvs3W^AG{NGOfEL4Rt2p2+sVt#Px3UfWl?eL>Xl7P zF^RtFfaKKxey{kL(0UEf573`HB)<0(GL-2fxntw?piogGq8Ps=im0gdK8)0&_L_O2jW=JC9cCxdd zvX@TdBvYTfSh8)PC-e*EW?cD@D0IrJkf34L9CNoGdBb^iXJy*=TWy}7avZ+2KpZb= ze?4EB0xRiElQv6=h|1Hr6TP|32M_(U&HUU^6apxgDJD6Ps4p^#sW!+f>-Z5*WkgUA zvGmFQzZFgM>t(iCoHqT_qqYks*S52R5a{tzUCTL7nMNHOM1yfvV_LP8c9)N%NFeO^{+rkqFCG5lRFuEt|S_#WcrYXsr22dGH%*d@tRzgbN zcrfYLLnQLU3_HGCc+7QSv#dD*aCnS0q)vWT9RXFE96$vjs|1{|5I;dP6KPZblMg}_ zOj4Y)+sV0_84S|_DCG7&U;`UC&%PFaxr3&PvQC&xuZLg= zMeqT-VomTaS_(tg*yAmbf4SxFmX;vY-ryYa<&Kk-C&(rF78Kps8lX*IbS^(AG#OYt zYpbmtKbbbtb)UAv-|T)m;&nsdUfXm<5%hCX3rfSd&((UbYV&6Lb#~D?-{NOvQDyq4 zTVjmz6gtO z?ortzqIYC}c#z-6Yy*Jh(x1?`sX!wzQ#dXRyZ}ik%`g~n; z`2w|@2yz8(D&rT6xI{GZV=jV-x$BjTQohjJ2*06hMV;4;R`mgZSF_9^eG;*`=N=sB z-xW=3{p{!XIhu=6J^JlD+1PXrGjkk>Uat0~Sf^=n+_s@OJHFySvkpj<5!jPQV2%5A z(RuW^chwKU_C}mNX}}0{1?*upC#RJc{X#MZ(m|}6#h*>$$R#U1--aXx*h)nfYd-C? zh)B4h>o_oy!OkR6X`2Q=sncA}7mLl)rX_QqlLeL2m$i;ClFj-w721!pT@56yz74q= z`-GiOG8qM!Wt*?wd$(b$6hlU&^D?RSih|lvl4iu2G;9@vLD!{b+skV-lI@d9%@5cv zHtml)GY1q*5HXB>wwsJM1Mu$S1$6}Ry~e`3IU-||dt}$rH9-|sU*S^wqh8ywn&(pD zWAakYXla!}$@{kl4dX(|Qr0U{)?Ob*1$I@|N%klxU)~^x>tb`=y5XEPg_Ygg5E|Z@ zXJLB5y!ZBoq6fpm^%WI$O6xh<#SM|VTh~aek4z+@nT!G*l>ge6%HP%{{%tuAzd~jw z{&uuV6BiZEOGL_Ia)Z(qNnp4qe5=-R+DhH2=QXpAm;t`gD)6b_7hR(6`JKAI1EPpX z*BDO_A`flmKMODcUB(i0%sW1wfQe`1$jxF|q6g zP>U#2JVGSJ2t$_Ifv;r)5)suJ?PM2V;DZAn-#Ze3p4;;%e{XI?pQYIZYR1ofvlLnvCTS2etfwf3qk1Ii93oqbZ^t%Ly933lgh7VcDOMr88Gt>Z`gPB<05-=P zgN|zJj*8Jqw8>{O_Rc#pV8SMV^i(jx_Cm=ld~4`>_6bOUZL=izAw9IQlKb!8z58D9 zgk!SSIisaWvSFQ-c>0rD-m5*!-gvHrp4l5J2rg$jGbgEC34!bni?Z`Q#54n z$*a*8)OmW^;!KlbLw!wzGIj@Ell;pZ(lwD3F4=lbZ@6-%Hc5-$)@z>rQb~{J+MPa@ ziiu7Kj|Xr=e>=KNbx4bqwL_(_m9VsCs^}xicgK{?$Dy`uNx%INSnT_s{}>Pj>2Oqa zr}10(LYteHUWvOiEPF0UPGJcUY`=J9YxoT0r`JB4C4OZCAR(8ES$0r~h;!@vhg=Z{ z_%~(+J|3RY*_nmdS?Jrc-47b#+k${z{*mHSnQ5~L1&_f^fEftvlK|Axf;+5WdY-M|`tW;;-$7mQiCLTns@5 zI))smNT2+Sb3H#CXSJo?U@iwuT_7zEW+x@NN(}j56fIfa;4dY%|}U z10xVgHq_X5eRd;$@ycbB^Nc;(Z^UB$YJ5un5E3 zx~hm=qg8$C9CDHsa?JlKbU2bU=wrW_+En!~!smcHx*UYdFxciU!HB?{1C06Cx|J}q zS`h+mC+FcC>wwja&V|sR(?^|H>NpeY*R7aOU^>(Okw5UbqG=jv-Mxd&am|lGx~D)7 z$S?(xY_YRFU7I*l@3pCBPSm=xedu#&lwu*J?oEW(W()~EqNX~nG>JJT_|<9nn(wg@ zlgZnJMLBxZOM(sp#H)YL>bvG`SSJ7l9xO_quCbnc(e!8W)ZF&eJP0RdOaMUtD{B)oPOE=0RS3!E zc-c6H+bvE`JhAHC=1UW%%5e2}cNKw6SJ`csueU%^WjaGD@6c1_UUPnV%HeDNAVNmF zf1nU6CQT5p6nm+Lt>^c2rD^|e>|^+@c8g@$!>mdv z(s6FhOTpg`H>gJTN}g>i2myJPak~>0IZQ1;^Wweb1)LWHz8Eb!8x_?|%Umkb=JUkK z0{l4$e@A7B1h6V{Tb*_|@jsmVzd7q0m^?Z*#4V6%`J2Qq9x}ASpg9@*mSL|vLQjJ# z%_zPtNp`)D4NPkRBO1@XykS$dHp&WJhQNp!rGD69?(QH0O|`V%H+DEr*Qmq%MTnSG zl#OpshEkev5=$Puj*sWuU13K11ReE6UgKgx(uoIGD*T_&NNmuypHl&G_G%*Hq~SzU z|AMMH6F=#4*u$sanA zU96H8SR;o!GlsY*qKVHQMg4M5okgnhdS*33Y~`7Fc+_8ku=-BKIi2_PXPjo0hzF&F zn=W<8n$iRGh+^+Vh2;lOq6VXIPA6x-2Gl<$MO5TQ@_trH_e1LMo%qVGV(-2HHQrGV zy{p1E05zg_wh@MXEqO{U0L;r(DR(q9Nbq!Etb)u~&C|1HYp&UJ<=Z>f{1@0hbK{|B zvVVt!kC*}nnt{8Ji3#aTyLa#2A&mzRW;y*up0{M=yVq%P>6dMCwYs%y;$f}Zs>b?A zL0l@B>~ef9165A@%; z=Avqs%pQj9<1lEZNHC1GCfpik^uhvV;Cz+){(la=zXv6Yc$r9wDcW2}53Wxhc#q-S zn&bCz8l+e>_#rHJJT|m~WHLeYyiPB66V$vNh4xy(7#UXjCi5#UUf!{6U)FDoqR%tw z?zt)uw#!A&(Jf!ax%T>g9QzktQ$O;9{Uk3dt6Y%$+c2YmPMT%4n!5ViEcx)>WFhXe)pNG8UL@p`TUaj7RJkCCUiH)p&WRb>v#d!xZ-5w`? zZD0Zj_S|GXYHc#hwH5NqVle8LCc_5z&Kz7Gub?7R~u?@ij^F%jvh33Xoc!24|oBs>& z{dwz{Jn^b%vKnOxFeEXrS-_x}*92c~UX8dD0J4Y?d~+b&9t`P4%hBQ?Oo;gq^kVK1 zymoVu8TTiHP9Ufzf5`oZ#Z+%rlE_HtMY!@H};-9hWSYKav1sM?m);8j2OasYN zkM-}0VAsS5jo*gBa1^xw7IXZ}^LC=FLic+sf$Bt{c?nuod|@`H+_u|Tcg!cHUg+8=KzGfW2Es_3>ey3vLLGd>%x@CfI|!!a61y+0Dw z3Z*M4W5i88;;p3;D!IN`{4|`-|O35?! z7Z;;<&|p{>tHXaWWBeXZbJ+vQY(agZNb`Bb&nZPk#m|>JGxdk}@5Dx#?=BtsJlmaZ z+@JB9Tc^tVD)@HcDMNYB*RR7IJ%UIN7rX1Fl+e4fmZr5edoH&qR(^WczYva$C+V67 zg{m!}m*p#EI#^*@P+nD~<<=tklFfci;A6n0KYG@+qwGkALR8Tb?5Ri|U1$%#T9lLC z{qcph4Geq1g4S%dmPWn1K|wKguNF+y>-39l-lhglct_fEOAqLy#G?yz9!R3GD{emM zb`BWg2I5>c_rTPpyM)aklQ*IjSDv7mxx*AF{Ub}*mb&XEFzOwRe5w*hS+8NK3gWWk(0bE$B+;eoH7tCEk-u4lU zb=utrasb`Qj2(vor^6Ws=hC41Cv+9Ba$AQ{3F&X<4wU#*fUMZFjgf-8V6V)VzP`6b zrJq$jwx;!CEUltsT#kPYNTYQE z$xG&rk6l!vwX!Jq(z2qr^ib%C9XGVPQS^dUQF4St)n8VGeCqL z`zYeI^TTeOK;&xcrE>tvaD6Bz+@BGn2m~cNPxKUYUAx{kkaarx2|~%oUUdtAkT8`% zP#YW=jOTiCP>q&U)336Lwbfy*86X=-_cm>{KL^5}YJ*2a2w^vjS}-5~Sh{`Xoy;7+ zWfz_a*#*A`|)v;f%mgC6%qE|z}>!;&#fSUy&~TwJlR-A3IVAWWLMlnUGVWPp$c>TO>J28gF} zYvkb1(8Z(eSn+atg>ew#J%spqu{yit@Lw|1;3!k+v9qzQAAl+|7UlxbSbS8@Tkkko zskqa8>5onUzTccZ*yg}K+jb5hgO6REOgR}%j9othx}$4{xYMr@UpO*)fSbNq-v1oG zdkHG)IRB+VSb*;Nn>O?md*dg}qc2YL`vBJ2#d-=#{7A8%Dl^xd-ptwiW|l)B?2k2T znreh-XS^uyk}Mx!)1w&>5muD|W6VN@{BvkkV<{jaK;uzO9@t#bf_iyG?eHB6Vy1nA z6I!T|(&)MD;HUYjSHPjO*wCkc3}^6BxmXRj$m?Qf5WfUNf=d2~7mOojSPRm#A|?Un z9s=9>ok|YQ3P>s{+xjcg2#adKMVb!ll^2W|XOk1PK%LeQd(7nW*23P#Xx%Z*xEH?3 z-z;TW$@AA-`xSgaZ18YXg723716HD+G2{uB2~efeudoPB9|j81yl6oNw1ii>_X;(S zo+5TfWvfD!6*D4?xE{>?V_N`=xilhNh=$ylZErqaBphz&Kc2MCcng{-Mzt@L4}qwL zDy-lF95HK8`#cw;cYQ1CXA+dWX#MjAIhaS!CzU)K}*))3V`2q^MTPCQ;j_gnNmy-omwXKr?}dC5h#c?WE^El1ZdpQs8^{K@F(U(epc8Is{HhN!Au;&y1qaNCcHmP5 zaPs`73CeOY78ShP?sy+y-ygwLHa@dgdV*}TV# zRjM-92~gv-ZX8$(#yM-*{O-`w680LfZ@z*pB!^+-ZkO|E3~fG^6J{>M>LU7Xb!*v? zSn2!Bsok(>^RtC;MuPO%hhcrfwzjtWKxA;!8grntEa2R}=-%w+7Ke@=xh(056i&hS zBdTlB*gh4?#*XLe%{HAL5!=x{QU>Gp_)GIAJEKL3*AE`dk?v5fs{FMo^WZ~F7b5z= z^MAqpnIA~dv6AHpp4M*tuGH6QA25RHaRT@T)tSS*6vQ=32aWr^+$DuTUh8ZAqns*S zw;Qt{vp2_|18L-K;8yDytd9{32G>&FyvhCr5t4gOP%IUn?BIGFbH|u__MDG`pcgm zpF;sR!Gm;3BSly}kNFI!Jmp(SfWpDHs|{xtEg&k~Juv8TH>?@7Gp;NY0%qpfyVQVL_57!O;;p=I9+ zL)l)--uU+IK^k_7^w|QjPZMNHS$-?3+TyzB?r{=v?-oU$^$I%Nff%dbzNFtp);*P|uYe8B=EgOs4x-J967 z!R4`6%0|^wgx5E7b^wAO#js7i6(oE;{Hi`W+N1*28lT$@US@GaAbBx~j&agET(`-w98NC#1l-|*G%SnGeiE+@-WhKUp9{(ftwurQ#`VY)k@ z@jFO5nG!1B;t!PK*KSMr((>)(&UQdzrWKL7bc@DI_vjX%)Q5xi@12XC`rTp{_)#>d zJ>uoyOV*wTpE07j0mE`GN|&kll8t`fXvgr_RpAB$fOq2~Dx} zn>_u>^bI-FM6)Fe3$!>&R!^>2Ku2(3^&&3AUW1vk!O=K(ia&5|htWFavnz;3t1ZDvYL+l`9YsO7RBFAgFAMBqQ(7(&gH_#iIJp?QWe|vU8Iu9}%8jJuuVn)yy8yOY11Nj|?5-Jw` z`oU4H7FHN1t+;2gHg;>Jq_g$}X_0POjqAzDNhMfU!|e>#H}*j~K;}N874dbs5^F=p zoC?4GxE>HU*F3h{CqjEb%Hs-lCE03DI5+Ht&Ef@mw`_4>pe1#X%!MCZ`hfbA-VYbz zYlOAa#wCuH)?HE5WCx@~=1;MHP*Krx0xWBl906wNfGo#gNT_GoFj|?X zsTp0k_xDG$%R&V3{R&hv-nlGwVbk9@x>EKGzRLWv+l`V^7nSivw6%2t@S`478u-q) zV5IQ@3qyrF*KlqU~=88`WUYPyy#A4T#F zq>MS6cQhNC_!!aeljwKzN~ZU69{T}kYze)928r+Rrgn)l7rErLn_zK=R0FeK1QtuAhfyAnII*TGSh5?+tW89vd2F zkSb1e9#%Wv_V;@M+8L`L%TAnqk1-^r(c7|)*~MGGb@!Z3Jz4O}4d4zmo#|vS zc8odOm991@HJ_}|7WtV?Kg9HecEsdjzufAx@5I~PB%KaVwtnwctnLuQZ&=tP^_XGn zUt6~TF0*)eJWh{K+?W83#1Gm{50r zjN+as{ETy0f2ytfR(ZPPX#k^xVCMhmd;Fv3DL|Gv*!0f%3ZLe0!GG`oi^2cc{GE`! zPBmfsM**x3P-ou<2vDcUUIiz3{Bv>T<8!jQ-YC79pWFNr202ZD_K94C4||sF0ZWeG z+SE zhXn-%x!xW8Ca_;CeR22PR@PvaM}-$)NfCaVy2#9X%>+EJ5)#c^40^zTcF|mqIzvmt$biiZZ>v083_y%6@^r8={v`30QrDGzeGR!{P88@x;UtzFw^VQRzq- zwIV2CxKiy*Ig;WE(=Ydy>zJdW;H+^bVnLY#`7zQul0yE8cl_IOuqR!=_obO|@S$Mo zj_>{0-8wcLaIdWD`>I?*^myV@IEJyeI(l+rk%b= zZ@X^rd{<=vk*9rIHGcQOlZrw&mKfn%yDIh^uiVKYMZ$~jLR+U>>c3g-KiMCH+4v7_ z=CYj`Ies_lho+d29U63^_UvG7h~2&uh$>co&mx7Wg#*4+g1$EJ+Ew76+rx=#3z<%< zS4&!67-E(e2~fhNu^;Azv-xjOPe!(r4)GT_3OoA{A`W>1q5 z{gG2hjZV9jhQfPx@I)imh%>O-VDRWP#knaK5X4jflNh~Etfus^^kc@BnV9nCJB&HBIQupG zPDpaf_+8N-&bQo`qEr^x7Wz6Hc88F!w8V30WY+)`>_0Q4 zuV3xklw6tF>4{|x8cnv2ZnNUG!DEH5HJ|OP`SI;NkTG)*sXsb6IIz47p$AH7QVUn| z=;9L+R9#&w8&l9aN`(o;B9_uz=A(t$Z&FeWOEzQt4_Es$+_y<=e*WG0Lp#Mf_5sK}YxwO15(*=P#PBx4Q-I&8su+Vgzw$PMH1z+so^p zU(`%HkwF`}{RG3v$k5Rz#eqmzM=_oZ(y0uSSGOeG4$2h5+uE)r4fcZ1o5Nki*N4bSA$j=_~W$tzOeD9#s`${>Cyr?<=~r zFKo>rfv?4+WN$J*I{IGwL5Fq9h9GvTA%rv9yaHZVg-d^W4!_{5Kw{PgBN^Re|-N0SUYas*#_sTYB}8 z0tNPT+hE)dS%mb-op}TBZFuOx=6_gZiXXBD$(N@h=9g%Qr%e&EPq;ru({3+{g@n|% zqYlstu|Op&TIM-X)?r?T|1C86?2robeJZpDkx$fGy61mA<{TtvMTb7fUUZ=m#@&6o zP)%oY!7tS@lk_#?^2~Y?tQyHdnms0D7rbpaUOA%{9ru)!YEC|XF900F-6k|;^0BNW)1w#=AMls%?6~es_R$%KM zH6z*x?ctXtL7VT&2ks>DKd_E9tIV402(r;7%Ub+t8<*4)MkzKPt15Ii{m@#A# zSEKF@hs%890w{$KMcr8i++}^*=Y!`!@UBX!)gFRzbO2&`EFd!71I61zB?5;k==z7m zs(D8G@^|dd$%2I*#*;y3`&LqIuFc$FLD_s080#u0++J{Dut3=mF7W4){IWbBVvrt7 za+Uhibwao>O!W(nuWwV%wfZe;R~#W^N!Z>iZ&8jgYT2?C>!z<&BgDQFR>%88nmE`a zZn8(ppVKd?sDd;oTDq*4{>!Zxs>6w-Qj0WAIoR7UJrnG3(tdGLEgFnq@1I4zG1Ucy z_FN9huyob;yH5hH+V3@z&Z45On08`fG#AQC_Lc}ko3vl=-zC>F@Ecfu!oT12J?GXo zU*g@jd{4Z|H$6F>3)=h-jngz8;d}X4inLQ>)Tt4#=S=YTsu3pmh-q$RqIVY)lgx&6 z^llwNR0AT>|E?#Q`2`(|fA)CG{^q`eR&HLAJsa>x3h*3<1&SusA;IHG0F;y3FwH?IA-}u^I z2il3lv3JFmq6CT$E&3q$a6iiI?x9Hl4FD+ou?-Z2eUI{^7fGjpr&&uG`W6Q!Ar9TC zh`?m=r&1V4)LnpVP7h!^vU3SpPN$AVWGnOa?vIQ-U%!2;QrHxIw=mv(QO$4)_JFIBTzf!UWtx5MHkXB+PV$yltuiRxPr|uyJ%KyGmE8~av z(8iR}z(MUd#|!y|MJM{IzIXnEc+w20vQEj1hU|OBiiONWQh5!kdPt|jX^2e<7I7?wTk@0K{`I>nXgZQ8GWH4rLq|ddpYTct0ta_@6wR!Ez(Aiin6X zSR)*|0JZR3?73X}Q+cfOucjA|Db1xk1x{p~kiv)R)7_PMFQUotbFh|$b`Qr*$Fm*a zV|`z)P%(aEvTFpLIlCXjQBl|WNVDCb65)4dsX7p5PeYJFze=OYxK)_%PI0mm)=hD~|BnAKUM;co@Z15s&23Tx=C ztzd$re#)LOMO@C2r6Bs5kI!d{amzR&6PRavW9KF#Ep>TpixYGpFx;oX+Dsl>STw{E zI?(njn2eoir*;~UeGb6*PRtCq#q1K@Vm0GoQwt@)UjX-Yr`g zt@zwS$F1$F!%9qy1rrP=d(0DgiW*lkxDZw`SK8Naa?0Dlsfkalu1VAr(GN#;{+3W! z0P@rR>^ZC>FIalE#PWT0-30<;x%ab<=~zV--I+^%2ST+cCG0hGcF118J9$yVt0&VD ztj3{LAmymic7#NJJe}LCs21(dWg|soIAm(@_4L?H9E@&4%5+U#vW``?eBY~l(B|Ch z&fYiC<|x$YhHNAz)!4BbJEv5jLM!A02Y-ENqWShR%sFVLA^xdl!iea!zIKiNg;6&>|DM16`l*fRuF*~cNB zVdAks3zKIe{5g5>PnjXRx}_vfhC|{Okf!v29y)fm)whyTe!!e4Y-~;8XNh)EM+82v z0SkgLv)nAHF0M@iVfMOFm#K-{ucMY`H_c7TbdO4uDT}xiejt$a1&Y0E0G*zWVE#&_ z;`es^)$;L*hMqI%7>`CAZRnvm^+LsZ8z0bjU%#F;(LODv@t{S3o|CHk#vP^iVVgLm zj02eImU?&i1LQNPqayi1S(XEoL%(OqYeH%Prkfqk=ub12?hP{8&gOhnUY{@a?OTla@g8T0-HsdYz3v*2vs-fGJf)%Q}9c zm@?<+neyj~LhCCUIJdQ%jB_ct#HXG`w5wXbBx`&2*6Y^pxXwL=!y8|__j#6oB)V7M za5xWygMEHYKHWd0{p3;QbURB@0STF&?gM1Cibh_&DQHD4Au^f+*74gx!4)w9k8^Kl zhO0?ZytCdqTm$BXvYeYWUM!08b#uunres)sE8?BEufwCt6hjrPhh?2Gg@|;|Rn?7H zqsNc_=+g9PtV{NOCOHX0n={W%Da!PDPqQt@W#+3x9EMcSEG(1>d3zC9^ZHGswuK~v z=~cgR=j5jpQMY>367Njskm9oTJ1N^1L8}M9dG3EC@jP&u_j0I>)%Mybg2niofV9J14o6=^%shcjlT@C7eSJsJ9jA)kbgAc|QWk9)Z86 z&I>Tn2f4IdjiX=Ww}$+Ln43tKEpKl?4V={V7P0fF6!03OQ!q<>oE+NW#&zUYLB4%?V|p(kY^KtMb{gbQ^v(%Vp96*YtHWl zgATeNfZNya3612GQ(J?6CV#Dx1{{q@EuvdM7k0U%cJ>XL*o@}zZcS8F2rKq;DB+=Z zw-3C}${1d_&GGO#qXr|vR(n>5@IUVx2^qqtqu#leba2&{?`t}}q1KjWJFUB0604Lf zU7A=VhybEmGF#u)HwKDac}g`O*u5&)O9ubDV|-v)!vk2$5&Z-=2v{Si6<&JOYaNVb zgx(w-j`LCfPNVi3Tj`G9*}7BX2+FDyK0HeIWKrmUcd@3Zf;u`n;5rYo{1hf8BK`!~ z28x%wkU^I3-$08=@B#Cu$}$ zW*dz=mTcqI#;CNsA9{LzBT-ItT2xs3VVwF&yP{Y8yMUd$IR@G*SJ_}*4+r`BOG5K` zM5fvvIps^Q0S(~#vS#y&EZ!bvZOu!O9A40?x3k|G7$XwXq8UhdF0?Y|-wf@+WW3gZLVaD*=**BI;Bn!9E*BE|9nEDe0=UT}y`X#99|#pRGX zzwOFUUKE{u*NuM{s-k7$2)BW*NVf0^dT9tKdD};c01}(*oiJgruu^c6W)G}`#li^l zTr)I8(OfOh>?B1bY`xCOFq!29O3f$OF1cJ~1VmL=L!Y^}&bS?N4qVt(9;~O%wvbFp zj^Q;Nj(U2}9}2yBL&d>3Aok%=-qEmfQ^H%r{b%3jHTDC>V}{4~3v=WK>NnlK@D&W6 z;5w%>S)LYH#uRORT!#HDUDaLtu`F=fqi%g&P+NAXy>r=h(sbqPkZbM1_8Z=mqZ^x^ zdbDFI;LgE~F~I_s=$S7rR{yl*-(OUy%Xr^I%G~(mod(rH+%lKMLN>LvM~m;)iL$-d zT|6mm)Ot7@Q(vIxE$dl0G1L?X-=BZ*x4R6-#AulF+%Axh9vZn!_6;Y7ds(ONhcp{Y zz`Y!GWjj^l`BdLC68+MS-yERw?Vsq29Wo)@NrTKD(cjb{H&T7gw{bv9^KZ^yFhnL& zTSATskermLh!-Tk!B@5|P_?r@YU5eoOkT;wiCD6vgDFC%0mhP)K-f`(k|9%<1{ESl ziMF}8EhqGwzHP+esdP~HCK+Cqz!S2YQvLdt{t#nI&Q_1GLa%!%tBepm!w=ou>tFVS zR>tYDonka!zS2=a9d!7ZHq})GvCN#!@8k2ZWpIKGugx#AQ% zYWPb%_S*J4uDqx(|)$7ig$UrD;A0U(xEJC9KtV3q1N2nTL#LOdHW9 zT{^$y3_3c>lb)qIpz-S}Csstv-sE}l)Alv?^c{vQG2 z-}|}88=VYqumGpqzjSUgqw#Q@JBVTSg&}ce&v$2VBo&K3d%Jfy`PrPa*HH2Cv#0BOmVIQZd@1mdXupr|&5g_F3de`%wPCRUhgo5MbCv1*EP zZ>r54BQ}KWRE7_`Ej}8a@umo5HVCO#%7%>DXRVOG7GkW=W})il z9#TQe)X*~0A@}YOMr4Yu&SpQG#fs@2b_==cmF&ZqjFGQ&r4_b?Rji|Nn5GfuaaH;kXa)vow%C z3vrozx%*mW&Uvw-noZ(xOx*3~!DHk_Yhux$b#k+oYT+oRsr!`WT?N@s4$5)==%fek-HTi1ue0S4iX+Cj7AKHL7>}U_Z;GR@hAz9j!2y&Iu31{# z@VX~#3RAs0(uQv^BisrSIV-G};BJqWRc#19zmFYL!!8W=3(J0Z^>=x9hpibBSvG{D zb@^?U7R(L0&3-&O=F#nQ_vO0ucRwg}+6(wIlkHTehKIMA`lc3VKadSqz0hP?9b!{$ z##+g)T(FGB%%MozJ;<@1ph)h}t{DCXox?=ULEltUb!yulr`P5$$#tTm6=-Q_7hD?Q z;n9XJ#mWe0fU6$SnGr*HGlk9j5!}99ju~s$k_HswM+CriXH4KSEMO z`%580i*XXt`$!QPgT%ANu|k=hq%@X6&3x9B($F=(Pv!srUo(rZiLhpx9=H6gxV%*!UZF zzigMmsxlqhr22H$h+NBb-ED@bv;!#HxygrC?={$fbM^?NXj>I8O9)fa$;>O2okE4z zySnW-&@QP<0z|XJ%*7Kg+e7>{-4x5LI^89l!&)%kY+$gk!bQoqg*?nRx|!x4K76;v z#W>#hR$WB}Q5NQ0@m+`5gf22ug1TZ@9?{JRU7rI%;Z>kv{<5Z1n8L@a2s(z`ceXUcvL}YQ7;+67UGNiF)?MJ64;wLODB!maI zZ*DaFKTdUv;Nh~0)t9z66~B&wRO`}Doqtc~hu@4Z-=?xYBxjNjT!Y=jRx6uItM&|A zv*#uLX)(s>Jmoa4(+3~t_2PxY*DjBD<;_hNi6SrNxfkPhTI$@buKz)vuiqcP{N0E@ z4uh}QwuuYkTaNOH^+ic}GfSYvf^?+BsolwPFZEPdi(LtiY5TC5QTPsPy$#IF*ZyqAiWEIAzw<^EVONFYPEs zm*e8xlIIXo-O7Qh@P8G2Z+!UnNKY;yCd=8E)|XKYwXX-OM$;=n%vjeP9JA&w^w6Yu z#+jqVv7eNgd;7rsvx(?2|MQvqhc<_^i#rcNokt(~L7 zoBa-=MHQqo$ySZ zQ%Q3sYCM#&T3LBJImBvP^)sC4loPR7vN_!N=#$9X|2}lP$GzsGGyS)t`@2;x4J5nd zybkHdQ>}_Uj`wOHVrRKw{ZK0bZLX4vk)44 zA}2#^AyAXBex!Q-63;MQ+g;l4^Rz~`U@|bxY6dUM@X7OfXk0kLf~fNYdfbC>hc6*n z(=fiutcEME@CaJKGLqGF$PpuX%&+& zDdo?y`IvK^Q=C=X->iYqxDM0wHbZpm((A#x0qQOp*`5w--SLmG?VwK{9=r0BSSPxP z!j1SG0Gk6Dq!#5RS`|uaw6sU?Yaz+qfHNj~fykoOH zTK?D%M3yDbg0_djDwqz69b^FWU*Po2E!Ssj&MG*!jQ^4AF)u;0OyZ5OH-=&5^5^81od7=ZS_+7;^e{lf}r0ZsLdl|A%+n|^8 zhY@yP(1#Ymy&<<~S50d1xt)mYxE`JMXm*E=()59vsb&2RnRkZ#nGyegHqdv8B*aJD)GGKQ6g{855FO{6}$3ilpcPeKuPMFT|vv<{@c6cj2l1Kmr~Gtky%fu9}FQ|M*R8 z`BS`OClVpKuXMixieHdV9Q74J2$cy@pZFF3OTuHT1N< zSgQ~1p*Jqd#|D=waQNzs{~lOaYwOMhUBwucH%UeweBM9 za#bcaj8{$0h}NjdzhBs7j3^zy5mV;qalVjxVdy1!xa(qaY&DyeU+MJzVSf_Hj3q?@qE+Z#{-PsHFdTjR8c_$ zqywWEiq@_txVE98--GSu-L=C%eBPOj)E+WJ&Tc_L{!Y%sR919UU8#vX_uN|!cOGSj z(w30;j#LT(?vJ*%Nb!JMtrzDodJc{#1K2Z2cm>_ha4U;eQByNVSlRl~^@$z>EDskG zj6|R^eAH&y^xe(2LBIU@+#1IZmK;g}r6$_O`0@1Fc^jl(HlG^lOFYx-IXm)>=&J8! zjJxOecU^1xSn8+K66Ku)viLWM6K1gQ3qw?o0Cf_hHEm0X`2rFFBM^h0c>LceGSK3R z6#{_!fz)rY@SDY5!)?Aprra5yRIHjbi;_mi+qh6>X;H$PXgUV1I|P?Ib_K1?vEFG8 z`)V<&{26)CYo6l%{j$ccbDQv>6vT1lc8@~mf(I0%cV#b5gz<8@o_1}ClSmw5j}@p) ztsO1Kt~p?5Hbh1flczaDN_30!M5xLGZYvz~_t#Qdx5-}eMSW}EaMD8d-lJ~wIWLk6 zILN*mx~61!5VnbH9*xZC$>Y*N>M&_`zPP5Gi!tCxzGrEQHdMyemMAYNp))6_lPqwd z51HC1TyjIca2a`cS3700`-yD!SF#Do3Cod{>K2K;m@UHuA|c(Uu-^B!gKpllCE!pT z*NqD*uV=DUTwJRBb}K1OP$ws+zFaky;7zT~$))=K+PBRq%J%lni$R>yup>vy-%y(GdMSOsNr)dst>QM8?4vS{AIYh zBPt&6n{RdkXgkL`)UCe)i`YC~=kLZ9PtvCq7>dFo@( zOsuikN?|dBOyPYPyJXRu05@Y+Q#}RkEEQ||7}h!{1_Q`Tq}Sy5s;7^-DZacKZw>cYLCDpw9T9CnIWXOWnoYD z{(@sSQ&e&ihm;d?cQnAD_W*3N2cR71Et5Cf_W`(isELZSj~!lf*7Pi1h(U}%*a*M!Z&3y%%yqR+Z4W|%3gV^w8V^4)u= zY$0J9c&IWj`s7wGY(LrJ9mNNwOi70v{Mcu0s3DGR0JWQbJL>~2wM>NAyC+!&+hjFjbamSM@D7r&78`zD3_0n5yPhM2nC*3#kL) zYQ;2U$@AsWZ8sdT+s&L}Fv=(EnWO$M+ik8QZpjqTQ&$Aq_%=t~FE7sDsed`li)Q@0 zamcHg7ES?*KgvU)8}`f7cFNT{W~jU5IMDDx5U)2-s!EQt&qQJ40anc>5olOwZOG|PuO7DIBiCHH=tz|FmP3l2=BXix z+C}7V1Bu&5K(x7`xQTl7<4aDG@|-&o+QO{15H1YT`kGI1<;c!{@ZPM`_v2MQpGA{! z_jDfOiHifJs-=w%x~ibM4t)^F+iYb}+-d&{r>v)e)7N|U>>c3Il9iSYg4$pyRQ(Ed zsk~;8toCHZ#Jn>yGgrHN1_lOZFg0X;!4*9Q=(ULXumqM2C1gk~LHOOGQeyq{?0LG^ z3y{rc58PMr(>Hk z3by;qId4DKDEeWcF5&Kv^Mbqxa?~*Oc`@S48h}OAR8>F2*xcM_^+_wh84FiNI1owT z7mzgW9?uDpgql&RYcpGF8JF4=`zsz|dyu9I3V=335%JA${V-W)nM$Z>xH8hrB!$E5 zLX!8l_!^kX@+QUdrt8MyH~m4Z;-I=ILRA3dEmHExy_KBSGI0uP`GgPwq|_BWJ`22H zXt3=qlZ8w%qu80>?Y?vlK3i>4Eqc{YUA0u{M(<440V~3D!AKH*C6OIInh-Fjl)O9g z4%AMJuo+lb3b=dA%XzODjcyIz|AS9vM~TcVzx+Tx@^e7JpHxsZh+y0IP4-XEIn^*W zs=lA!jegAj6m5O(u>$_^4|JL5!lDeH!+mLBe|txfdAwQ^y9vN;Q9DRg=~(cbiGpCF zy@v2rCC2zbmO%G?`VlqwsjUwcf+aIT?tMXP_@Du)YweQumdkVQe+SF(xY<#j;!gOX zPdYW0dpi4T(U#=MUJ~xuSV|m%ROril(UNd}&2jn}=22>b^(365E(FdlYvY)i56%gKlQk6M;N5~%X4OvqN_9;qb< zx$L?!=ZC!L3wr~_j-*54E>EA2eKguEf!8yy^0{5q=gbtgqiwl_6Y0e zG?IHnHikabAq#5732l)7w-xIdL#MlQ=W$6R;BhQd<^|M&RKy$z&UgtnC0-*GY3w)J zZxS?PPkeU-G#Pb^bIfD+tM^l!j6?3XC7>%O;c}FiKh7v5g`u_!}`6|>p9t7^G3oA@uEHOl!iyeNj(K!AfCu@H{ z(LC^U%V`4r{u4sAZTNZ?K}@pdzu%xo2A$!qc~Tzfz%~}f!8UeMY5Svz3%s)RVgpHo z$Vj!uSzogk$v-^)!Q*+f+5K z_F=4fOWQ)~DAf4LPd=A{DDe41q~@i64&L#_8@WXjId9YI!G2 z{sX;^Q<}Z#bKyPS;JG!qx8#3Yq!fFco`j=k?z9x8)&;nC=gCI2wvqYg@EW2%NS0Cl@rqKfR=f!-DR-u-x$IqM(bF-?k|F`=~Mv zok*&i@65)OGX7&98m6(+sLw}W;ZeB+GtVqlwgD>DSJ)M~P@ zhArTq!=%nyO=Krlf@oeMQvGL0#t^`Wj@N51k^BkjQgE(X%LA?WIRAoe|0?YLGGHNe zGo#<0P9#^+5+v8KfFi(JDT<`CKW@1}H z$4LK8ms8EP$`oO!Swo#9H$7|~d}T5m>!oppPw<0C`9vERbJRSz zuXUpZqCo`abSl*E6LsJm)ps|lLCSR+%1<#Pj{Cb}kMT@`Y4fzJPK*9SW>bQMfIn8| z`@NCx_hNY~aub|+74o;d_3=N8g05!;NkAs?*={eXOd|Bt@%^@j>LgH-2SsPM`jY-8 zE`!9thT9Ih*NnVZHr|d?Q}N3n71!N}@|c||H5F_qJsEn?%l?_xNUKxix%w>8-&pOh zNh-m;dag*)R--=&nTRb8IE!2HWB=QKw{Pd8TD9j4F@WAH7)ys>yQ}ic$5|=qnDY6U z<%pCz5sFAV_%expjt# z?_@aUpXKy-{6Pp76=jH{pY8A7cq%VK32=Ut(5{$Q_1|+(<%NQ?^GcJEz_Ny6>x43- zppvPw@cDxvQWx*WWV^48a)}SZ8Z&lI3$KwlHp0Q5b0U`s;tk?2vBeP-?Nh zJ7X?yxvALLY2<^p8;Z=C*)rqyAxBe01c8I_pYfvdQm3C`>`>F$5`NnjK8+7*qWTK` zTy`aNiH?4a$ySkp{r~*@-`V-n)jGGGhSy2{;~q76ugu>u=EA|hN7oa{8+Rv$BBl*X zH)@Y3nP}QeP;?xLkiUYTa+sI#?e!q7ZxOji%3Oc2ehp%m-XCh!4pIPBm{6dU=BNSBd zA)YLkDJlT}Q>i=(K9&joyf=oMuWv}Fztn`wbAoS8M>jMSZU1MGdvegg?r5sg3sZ63 z$$?aB5e|)!nQhX|ex{VFuZRty%{Ba=kL7dqSfU?>|3pb%&#qS2Gdqq7{C}@5fOos1 zLS;@E6{HtiEN>tg+Q-w<%grh9so{22swWDb4h<$AUVElBbV!-yyWk_;m^MDkkZn`x z59>`RO~?H#ZyQ4j2qZst*?%=b{Lf8Zg00BIWvvr_Z3b^&&EP2JqRsU&y}C{b#o5^! zXI?B$?(t@|RvW5t4qU_Qs!*9z;Flz8z4y->TvcH*B{TPm9W&-W#5R6+lbvi24;dyU z#$%}xP2{VvFEhE$!SUCUGNrkC92`>}|7(xK=+%rn7W+<3W;VixPwCmcnAhRQnv5d( z*J3gnx*5gux%uaj@9|k~e+r6BB&M8|s{4-YZVg%Zfdg2zVu9w`Cm?T>9A1-(e78jj zB)576r_S#hI?PK#7>?n#Cu4oKol^%)tu+<#)lu$plA{$XN__pOKExO`bCZ#ZEGW zLCM9-3^D)htT=JJYPXXbsy>UK(}rG3W5g1U|7mO$VZ75rEC#Czkfp21_)MTYe+>tx zIz=k#i_EZvm1JYSx}39G?)qQ@&bVfWc6$9aQvhuRQdB$`5}bZe`zjMY*>I_`L>~IX z_VC818sBTDmxH7Ftd(PGFPN+yw_bO~qW@VLrWQzP=P(T_%I>dnGFF$mpU%z%Kds|a z1JBMcy2379zQAvtQHntp?DXe5KR1&pI|B4Fc5pcje{48_@EYg_-UQns^*rp{PbsZ( zq@k#|kB)=+k&{U%9%MN6&kMt$lhMKn6K5{IzQ@3KAGR-R_vroJ>uJG<^Xlm+8689c z;KXzU&}I-+0-UffjE!ZKlyE(S&Ed2h=)Jy5!EI|885t=lDpJ9YwujOB`uj5hdcjZ5 zoa&aht?{}nx_Up2%nh16r#iJ^&H)PcbRXP4WiVLB7K=Mr$Z1^Ts$h;3F}!pqyb-*; z1AotB^Y*8+8P(mG82^`9?c}SsBOy(lkycCeG}hLoUyc4lAjydWzkgYD8YY+O<`z_V z{otz-LNERpsRA@;*SfFR)Ck{t+NY(U6iysi2K z&703fZrBr^fFnClV_-V)Hu`WNjdUax8iEY&yUy<(9;Jz?SszUt=C(Gr9Ft74zB67i zluU9>>VD9?0|+A1l9cDEOR(%a@1IV`78z{_DxJ0zy)Bjfq^4^lFQjH8&#$KBXo}Cy zOnc2@f5n!Pxctn{y_mow-TRP5x_6vK`eZ1$ZoY0WGXD(_gLY3m_`pq~{Oldv z$<#;UCeKnvP?+EGCM5$pvsW*N!4?SKn72z*2c(=L$nb#`fK11f`jgdV^+go7hpw63mkeG4Er^Vt zpPoB&l^}oS{&gQXbQNLvOjGc zS;CG8OmT(iN^2}xr>9LRTb?vZO7yTDyo^@aWT|smvV!b~JDNnbf$+b5oqd>y0L&z! z-e*P#R-c=3QGP@}|JLwP;>G*eR1dY|`t(Si4bFSoyu*y>4gYarW9W_C0p|FFx`3J# zzas7*Y71G|3=i&csdl#J?ByHFi8eZ4i<>i%N?0=8a%DJtr4&_EUKkV?7mH^Y{MAe? zp&(Mg;#?I}O@|X5p`>}=h=osQL)YzD#^*WVEVcOCZZPTdcFV|v{Hn^d+tA>crx`UX5$<2Tp(hc1 zRrLyJ1CcA&U(HpMN=G+*u}&%LV=QekC9|gMn)ia-PNj$Q&7X+L>)R8mr1N5RzAXFI zdlgufR87i4@4QVUWlwZ-5ku@y)CjAgpQF!swQce%B$dV;Z#`-FCxZj{QQ0U6st-ym zfuIf-XVmvUArbhWH??bn6j)`9?TwH>PyhSwfp}hX;Zu>uhTDYq^5cSNQ$_|jTlrSW zS_M7IGZqFg0*!wq8dp)Y$R6$wVn{~Dw!J65Jmh)sRQw8`?bFq{lSs!Cvq-nr!$`K3 zj0KCE=GP|XYNaWfpk)ljzbi`pz3l#P7t)d`m8`mZ+clzlloia`KZL5ryZVklNh@9U zt=}tB_(uEV#vd0a0zqO`XNRTb(jWDz(dU_4c0XBsxzctL#{^c$P~m0f1bgLp{o zuG?0^pLposEBY_vxyI^M#FhJEJ&+^wc>k#KUWSR(7BX$d?g$TEm8`-x=gmLM1B~xe zq(1(mw-W((Wrp}-LwL^xS;Q}~&m|6s{&7O_uTjtjHo4%z-o$Ska(!&CT@UCZWZHT} zRx--R_GN|$+{@S;Xo3_LI6^aN%*>L^h_BNtDvG1?o`3&LEgtF!8-RWgB=vGnTV>%t ze|+5={`Ip}P(dh~>%)H}{R@}=g<$+^GVJ3-c88zgQ5o}pSli^OG5drz`NP2|7!nFxZaC`f^FQaaqOKkfgx79qc*c-90{ zVuij|K)4N~d~mqs_Dspabm8B3d<1tcc7WBZaKL`R*jtnFXY>7BuOeya8F!sgnXV>41~WhZOxePfYVC+jT>miow#YW)xZm+yl=ocbfJHf>G`d9S{A|AbKa?Q= z?o4$f(=TGUX8ODqXy8Q87h0p->YW`ou?x~*3PL9mw}dO}AKrR&^jc}+Pon#GYQBON zou^H&b;%)?k-?kd`9}1vbQeKca7G5wKcCPOiItecWPzM!byexCSL*`sIMbiTSA^F* z_|&NdYq9>%%geFSjr6#a>~FU&yjlajLWXUqk)=-hjU1wK&3y z(_AubJeD`4KCtabNxs;IRDAp)9=ierK?rNPw6kpuFK628cwX!cfj%Nd4 zNYE`AF^K{(=fIu23Q0^~XDGbgAYX?$x6TGDv{h6VNa=bqWrc25@w zaWkjnu=pt?qb?7TE*wWMOA$yKJV_Mv4%Y_R&4-vy0B?bu777*x)8<$y$2y?9e>X$6 z(*H4h1L*p273B>M`UAz(HcF2(>dyCEQQ}yFoJ;ng#4_0Ow5e@8#+wy|MU@vlDiF~#h<>l&Ebf-Y=nw16~F;@5YD$vLA{Bi+U#G``n z?zT--*e(JhY0eXro5WOCv+kTCz-}x;n9>WqkZEW~GZt?bn}q?d$;I zRaV5;yUFH&|0>!IIw}eH$$`Y@Tk#H$X1Y%ga&c(AR{KA;1ETN^wHp$L_e%hGRl6Kn zehZhXsjjvKF<`B7J=<+r(!tK}p_n_%EvO!2ZQ%%4({XCr9vO%ZRUh|S#vVfz(M3nR zG@}7(L)BLd=t_WKKLdMAZP=1AX<3ZdLcQ$u2Wv6!^RU|-cU}*Rok#N8(NQwDW$w~0q6d5cP!w^ zHk&9f1TvXzAcOG6&Spla1r)g2dwY9tOyqAe{X#56($Mst#Q}}0_PZ&CM;wODH|tPQ zQqRH0V!|Z7^KoY=pJ%v%*3&Dcv<~!}klS8}nYsZ+zybd|sv4xh!cJ;3u@7#H=t55# z==1iWqV@3TIczhq?V4C>asMCS?ay9%UB0>^n>(3s7`NC1EB*)i-Ncz#e^Rb+IFzh$ z8rbW}Z>{dI&1JmN;Yz~6Gm_zVmlLI#Z* zg3#_DGK;wZS9EI9<}5QMW#A5;5dTc6q-Zejs+!?tap|Li>f851?y;woJ7qN8WEoD$#zng(v(st`&)PXeDx z(DVBS0^~7uxp#LUE%S|2FA_&zQl3IOG9o1Tf?t=Y`P^;^v3l z55zfc2|jC#mk#>RBHpC}L4W{5(aq~YKnArHa1@es0$9(i11lBZ-8iOR3J>rU9=Tv= zXU_nPvvp~R^&A`=p@H|Pyy30s;k?cPjVtajyT;G64RGjy*0HIniT%|GYqIAOU|Qm$ zNTL2(w)t&6KkDIjr7}=4y-SSdFs$tuZ~qW;*!&j!K{AlOfWBAOAL7(U3a&X8X&b&= z)UDgms|6GXSb-?< zgpBh$k@hY$?|NLYD-1Df+({Vz9FxZElvNRbXqzb-veZ{%i)uDl7Q4W0u9?90#o}*^ z{PqB8#!RPaC!4A9)-HQ=rjeST8VoWB>VrT+Qc6eXk#M$7m9sZId<3ztLjsTGJ0M=S zA4FG>#mvmiWk6W@`7!xSke~);(%i1sTUJ5Aq8hCq0Yu@WqoYrYWsr#Sh>TSNOXPyG zvD$woy;AN7z~WcdrVGCM_`xBvg~Fv{f^?Eg@)AxA7D~#~2E;<%^xR3*@a^HLd&6<% zIf3$Gc?j2$@`t)iKwjBbZf(ecp(s+1g#1~zX6h!H=W;ioL2I#}2CC8Fz)Vy2Wk03u zhrZY%QuTKPtZ;ikXgJ;$X<3Z!om6wgK*I;ZL(MAZQoEI&x&y7rU3;VNE1T%>a-Abb ziqh*t52*xhBV2K$o&eJ%r^=d6fO4*TmL$r zdgpWarTEubk3Qh9I+&<=mnU`|G>`Z%J~Hc8PxNNQ$R==G)L&eB-Ijiqr)iX5JYte@ ziFL($bxc*@vO};|4%`%n58T?|H+%DL&aJN!-$O6Iz0A`r?HkN~*E>YI+4u(ixZ4kA zR4!osdX|?;SpD&-Feb;utot~idGKWqyJ74bze8GQnd%0E>e~jsqjcvPp){s#w8Z7O z*=)dz%C&= zEvaiE=W|D17c{3{fD;)#Mqt*uSBEk!kN9-ZR5&SlmY?LFuuBau51S9hE5Ku4m`!^7Pv($#+2uxMtUWnueKjr6RrRz~IL3D+-Stc}!|eKc6Vi{>fLXiPel zBV~q=l<=}T4ik0~oo4sLLA52H*!0w!EQg$VSq&jPm@r^UIY-JQ;(^>)-jf9G&dTHZ z_JS*?&ok{xi;eAcyDtrB%$U6AlnTdcYr!a$!en_=b9vzs&u^desR1a7nGSheoS4*w zD34#6OsF7me-B42$rn@Ik2?T|5zB)S6bWys@I4D3VuaaaJ*v8TKMc9aTGT^Cj1uz1 z$)&Ckq7+lwy{`9M?N$LTtd+G#>`%r+&#yRX_k+3hc^#yD*ayF0`HD8eH^`d5n)l%y z#}4?GLZQi0f<6wl?CNT;|9jgjA^?8docuPxzs-P2%6(w*U5!fVJK*Vbt;1d&_rD-V zdYk?@l-(^D$e+ps?3X5h+#yZ?M7|D6wjRP@lVpNuxoEo@)|4gyJqJMxJu zvCnv#!znG>T7;+s4vGLD*jOyekyNRI@c95y^{Y{KuQKxuBA73cM_?e8j5DC)=ae0g zJqQZpTOz*Ft27&?aM{pr^8ilwtj*U7*P^bavqe6T-;Cc;SnuNOsR0!1 zMnL;_kfkD_9|UnGyqh2twgFBqe%^0B`21Vv2OgznB5k{fPyP2kCeA$=D{})>FYh_O z5*d+6?wkPryeHqlJa)dKy}2A?v7$8z!i6G0$Dd#r4mk^`eWi+RAdW_=(C|d-*w;KF z3#)8GZ?#X286Y(nh-n<%*9j+V-Fjk)Ijo- zJwFr}jhYk8Fk-g)VrEUki|s_~=G%{>YnGE-+Tc;i^xEJ;j;34`qE6n8gzFeix$etx zh`>+1Wz!WM@FAFJ&U|0%(3 z`%3YlpgQ7gI_T>ZvFGj;qcPyCxHA(p50}atrkZ`}G%}4$^jt42okzQ8z=m>T~$R9M$B0ddw7t1`l z@1t)YQoCnq(iV-{+}4(SUx3rM-#OzE#X{z9=w(0%VJ#1vIYgo-(iHcrUW?Arn5EI~ zj*Ka(?>u&kOAsd&oi`6-hIPUgDoad6Gqbpjo-l)GjnhIX5#% z;7_zgtUhCeU7kCesmGfI>&aXyETgXUR-YoAq9z{;j~#sd%s~ed;cMFTfv5#3$$7SL z$m0F9+r)<5`^T|?#lxE=TlQZ9$`KKekA%L2@>l4td~2WiS-TLKr1yjo`95MrE2kpM z?cjIVFhv;@eIjJDAXT%lqE>6w?K^{OR(Z2t3JsH*-G+2)5L-ozb(?!Uy%1LY(QK2G z#6Q#R(m&RG01H-xiMafrVj(iF(q0S=YnL2_e9V72@ru5*mb8pSBB$-_iso;|a}i{D z=JhjANoaIRuXFmAi0^( z2Sn8n^D(7-c%SFq64jvw?S{L;eFt5t4|wa|r>ew$&~q zm73`_aK-CeLbseJ-ctH(v%c*^pknH35Vr5Y>iW#3Iel$}7_fZvKK zMcfS+I?V?1443G<7v)40K+z3;aaG7rd+w|p7&HWEh%N50@VK?t6ltLXOkD-)66Q|? z^*Wy&TcXhO;w4kFs;hmk=MUo!iyA(}9Cn%r5e6$?t!vz87C9b?^(VFG(nQApjh zR!qG-Hyok23W}oGnEEcBWvoUOcD6pdd%)2m92CZ%o}K3q~|ak|)Apv3@^DR}1UfZKJ)M#h6em&4C={jvR70kuMZO0%EIDjC=_8 zRyEucS=o^Ffm!U&GxXGz@b$&ym7GgUOQ#N-i?i~yPwztZO8LrwkZpSC5#%OQU_c#= zIIKdRH0k~OwTGx1wuoZ1ZlA(p%XW}ZsnIICjE#)qbM#n!_TETNs-yh%9wM96Urc)) z3cOYjea*{>=G>=1bluGa$wC#K1bPefJAq+BGfhOgulmFw$h+A$DyeIIu+g0;AT#ju zCq(F15|8DO*rOaOsE<2_z$Y!lAn!d$*h>aquYV1eKc!K3de=D8s@j?cNurJu|d`VteJ^Fy2^J@$ae7!S=O zW?26Bt-e7|BbwXa9Z}dQ8jbkT;OAIh%_Fibz1;D=6>bC4fZ$8b_-$WyWa>!8E&x5 zLt5bBw*lSwaxS63!}+qHYC+##Vo!hsvB2_Cu^Uhmsxay$6AQ4pgkc-SS+CCSP^$#Y zdQc&~{i+YwhRHKAgHgYY4Xd?B8>XnbHG3Lt9um81iJ}wp)MNlrr%wO4z-RrNPK`BY zQ$^@H1Il78c7S;d%mc?=>t(9%Ct$0I8$GAl1=4c$lTS%oJVx4>-Z>j`BUo(DhYLU0 zIlEbK2DSDjCgCXhl&AgunY_@+3tgc^)bei816ROS4coHaAx-f8#fa@7CdcUWUZ3eszcu#BHAHMSA?5yxhV0wtE6Aq^9LFI_tq9o3f}y z*^ABOL5D5VWS}(hV(D^Ctbg?JztZRXREoPSl{Xylx3y=yQf_S62dxU~tItu3G4u5*m9=Yc4%%Zr|0Tlf{YzO0RrI*AR38_9BZeF4pDi&{QxzM zKnrdvMHF%0;1&Yf2BnxK+7@6=%K$(y0(_N?@5&^1ApStE_Wn}mW_H|gp)Q)XN=6nU z=}t1z;inFB;-nLNd#R>}wu5=AbQ-{ABS50=v&w^cm;9Sy9NGgitX^X;Q z&4z~HLk&cn{h5AM2zPLLU}y*`2TLJutdVIx+jjBYO^GfvY&jJW7O!+jnvc=IbmK$g z2VgEXt-nv)%_);Z_v}?*Ley*_MrsWrh>-PwSbbuoQ0zEK!>zCBSYH9R@TtTacD;PE z&a5=qrq2oiifAs3H~TH-?oQ$>QeUkr9dmSV$8iWLisBW8Gzs@B(_sIVH9$Gc+U-Qm zRE&BPviUIL&E|Qk4(oxi{)!|JzUlfru8Yl3Upqp`AWj#!kIQjS5Bt7v$qGyJOaAe9 zH^%Kg&1zO_y};`I`Y<#rbeTP~T(K{f>BaPsYr*3-ZD}9=SBysZlGCjUk+&3j|rlo0Q=hz{`EA?NfKF z1;xPRY}&8qSvaEaFuyrU7h*?rn!lqK?*kxd$|}l$SmVUv0+MhyA5Uy7tQq1bsmp{Y zzO$z>Xbm1Vf$sJ*I9te2L5@f_-y8(LUt9nNR^*M1&MYl@_JQkz4LnF=_XQqT^zJ2a zn1p?zh1vq>KCbSP0jq}eW>HH;2n-J8)67WysECmRkdrx%oJsuZk$dbmH|kG;rCZPY z*)+cVwP)4&XAIb*gdHu67Vq<<^hH07{S8q#2VwYIL_GlwRRhq-*TZ-|&;$23>(RGf zh>(=v)zDUPr?U7@i_$QUt@})RAkJS2F&Dfi_RPrASuD&e7M&e2sLc=x5eJs2_7G`U zhV}HQ=91jO1F1%Xyb`qfY+wmkQ`Vn^-&U~mgqzbt8}>?yt4ezi2Q4ho&634bLm3tS+LmDchd_NBf}~h}pcGh}P0Be$!uSD`|+1I1Mpq}}vJN9x) zDi(@i(i!4Bn@R1f&I-Aq49|yjfK4f)h^63)U6~7jFc~$$M@rQ{7@kpGC`MEnqavQM z03FUyoJ>HWK%7NVAX^z&_6SOH3%U~+iE)7lM~Oc{paMYu9r`K>7P-sU zgw!G!x}wH z4BvVTQYzDopDKL<>}1<%LXGPG`V*e;ZtjQZ1MIqc395nLXgaarB|v0-Y?~6!2%V1@TaTJ6o9&Q9&++s2)-xK*HXDKp5h$&7_4Q1$S=H$dL%Hk$A;J% z+U?DG2bp{fI(Mkz!+ZVu3y>0n8uMe^vf={2@# zx(^xG%>oX_^K<jLN(kiLcx^`%x)2~9ubfuXtLjcicuMywY z-rRVK#@eH&yjUxz+>PVi`IvNJ#YDT+O`;irmTX^`Vb~;NYqSQSA(H zph+W4F7iq$)>lZ%5_7jXT2bVrm*+1G#4Ai>@Jo#V*mUm#H^p37J?=|I&iA5isi5qK zM+T$QjL=>TUeYLf8mTsL1dsF%)zX8|H9DtO2_dX6^d;(M)BXw~^P{%lPa)rE((W}q zle~kW^c9IEIcW$C|GZOqvN6&*cT`YiRb~(xb=a}HXO*pf_oZ3>Md)knr%VIlB3i+O zlv2+^eePwnWrfJR3$J|9_EC9z5LCpXb}}{aT5qD9ovT2aEK0TKBfEB$EIdw=KVzKU zQ^d9+q`BX6?;~`pcC|5_LT<_Sh=n+)8KJw4a^y|Ri30k zL$j`-038&&K)Wi6vW~$|guO}?)k~wcC-~}5C^8p}*d{mq7|>6r;uhh0fA+I?!<75+ zr6wjyXat$K2&N7oeEE3Yac()o&d~n&ahL%uT*w?({rTQSZGp0g^2U5)<6hPK#S*rA zg_?)NaO=CfWmEFsxZpe!dz0xL#Gn(t&fTI(vld0<&B8`>VO7k>_qIc5A;llOwuF|6 zqp8m2O7Qa!NVj#vR?mI2%Mlps#3#I}xKcOTG<|d1exFZ(h>LFVR&MtXFXLd~#E+S~ zP`m2-A_Un`C>OaAwNCA^)$oHdoW4j?1SghTBj00#(v!hgJR=HQ(GZKUJaOI112-T6 zir#(n8*A{_2jrU*KV+)h3K{R&BJ}F>yl4uD_g)lxwY*S57#Hb^wJwJ>6Ee8W@N8te zS^(e7Rn*34*u6sB)=0iNL{k*4!@IO=yV~(er3TRg8Lqc&f3$&>#%~d&FAn*%FNhc` zpN|T0uS=|CI{)$kbMooi!Wegp2BOI-J6$!JIvabdF|O&A=6qy0R7j5zQT^=GpaCn6 z*7beSDchpwAJKV#(l$T9M#l0ZX7=rTz4NO1li~QOb|N8H2r)rBi$1|;yGjKVPRt!L zSJ2tL1=zvpE`$GJ4=oE@0yzy0Su^TIm0{L>g21EXXLnmXNV6pJgHD>D(9V#r@`8?b z_HR;{?&81ylvvgNsB#^`F8);clD4JSBJ`q}ob&a*pPwkXR8tm>K8XfF;$N~N>b%%D zxiPJMK$0yqDHgYdDwNL_IPwm9&1vp97!bHNe%G;!9g9=LiH4 zAp+8Ehv==#wmB`|)OSkZ)_^jQtOj28W6~+17onosesokd|ysOC+v z_%8MCiZlCLxab+CrS5yaIs3}T0BTL zZe=g;Mc?UpPBme@^6y=$a^Dp#?Cee>Os_Y1| zeaC&gq~7g(6ni1m_9Rnm_Cd1*9enSOw+`-Y|5Mrkix-wL%SALNYNE%ZJ+4n#m*155 zXtGLTS|w;TmU)@9^Y+u}kpCjG`2EBf6XgOF62}rTnoI9VXbQi;=9Fy^1HyacbP8J# zg^$y^jEP4!v}0$_mr1V9dZB?8$YySjAJmfZV&}a`Mz`-_P0Wvc(LIo#d|g{=Gf<{- zW?>ztnE93nlmQ?b9*7N}znVms{fMHrA4O%V^!5Ija!$d;XLWPVStD0RU~RjSIl-E- zU%%tD)|_s-MAq2NBwiTLs3lBral!YUem zo~OEN8+bBt4OyJOJ%x$}BnY!Qh>C+wl7r!Z;#YyAtwhkhy96ht)n6%BPMYrIixCz` zhb}^fG&PQKD`-QT0HwoMfyXcmg8i$n8hKtiK%SVBE==p|ONSt>Lq~n?AE-3*TE&L#bXo^1&JU41nq zJiu$bzPl7=KN-S08rl=zDIe+cL0a{9<0fnaR5!l$L4qiRv$?Aj+2_$--)Y2CpXS*>MkIZ!fQ157NLpG$j3C zb}`K^l}uMMyZm<`Kq9B&F=FR@QNJzUJ{cPy#lC)R+Nwo|z=a))luUJU_mS&7ZW#jI zgdpasM4R6IJqV=#4PFu>S-gdXv5{pMekwB{NV0W|d?kQ;M*@MjSD|59bX9dfM&ua~ z5?B*fIoS?-U-+gv`X!e}^Iz0b+5UA*eVC+~RL1+X@OXc@mEmp0uf zE|u%!3P;h&bBl^N$6|i>W8vPzDh%ZThku8EkcqKUC8BOmNn;hg^bi@`hjN^PkBz@@aj#EY*#ktV`E_ z{X)P{NBhkBxu3}XHOg5tkzOAB$hF^Tb-_TdrzXTTF7oI_8l+ePq6?h^Tg!WY8-vWI^nCkJi-}8IeYk44Qitx- z-F@NaAhRpn6*!pGl|RoCxL2evu0FL(uG=JLc0x&UW!`=}@8NQqw6IB%@lCK>%H7nG z#ME#BZ(3KIUY5ZOVmnd&v8%1Za%Bg(N5;eJ>SWz~$jP(9{&p-%0JBilD>~)0*R0n) zIH49lVaKCrC@_>yBQ8uWW%Y@8ch0kO7#+esMH4FJF1A|(T4pX~JxPS!n7mNf^Ly{~ ziUX5^0cl4V3Ppz6;w%lupk>&}n1$=@W-@akthKnI4C5v-eqAWfH(SpQNR4%8P+2tD z1Yv1zGP;Xj7sit3c2#a}*Co@|$Y(XdcJ9B*4kf7RjUqHV0gTU$(&#%PhTAAub)(7$ zz+`rp#Y!S=`i;AUzb&$>c!~XdLq<1O^0(gSd7=T)7II94G|q05W@LkwU{wvA)pXEE zbyAnl(U5#yK!4@V$FW&AVL;wCN*xFE<{n)K9R|26q}jyjNtrhJ*0vnTcCMWH8%~{8 zt&uUIXVdnLZ{=ydU)WVI1{=w?-7Z8tJ@Qd@3EMV# zCBtVv2gq}Ic_P+Q?fZOZdobR#upR@#v?^j?+K!75V4d?K4L=N@eqL`Nf;IzVAt)1_ zZv?an#o|QG9jdoWdoM*)rB^5lzRhKPT)F`|0rE-H8cI{g;87}gib;&tkJ80&Uf&H~71?TJT~JSyVEmm`83sLqSXC*JcC*VMO7 zce(;sl&BaM(^cBo5`Y81?0O&I zecpR<_Iizl#E>)ZB`$v8IHy;w42Ksu`1YNo^VM!nwzp5!eD;9#$$#(8C3{SRu%TV% zX<0&|23YL4mpmk@hm5-3yG%h@hY-+9GP zWlk1fSZt>8IgK3hKDqYu*6Y!UUO!WK(!SdzVCK58&*H4@&6h`ix5W)^gR+%g+v;kg zeXm?z4+1KkD{u9-QE9O?taG)a=M)Zj1ly-P^))$KKlgg$@p-L#$GFdqZ2TrP(!_`q zb8>Pt275)z8DXKb|MfYvXP?-r^-ayKA=@}N=m~}5V?rlP5}5+SM(-ZODkSRT@uX71 z3F+dZ)xfP9)_9MO6xU@rgjj|_MtB3AM~DT4PgJhC?MSe)Qj52L9psUYy8EaW@6(O` z+6@FZKxS(lXW`c011E^~Ez_5rB-p|M)~EOG%PG}{4Uw+d86iP)YPeJ*fbGcAd2u@e z4FId0d>nzGqP%v<;B=Y}XHRhJ+;Dy9n9|W)_gR!s@=&u9_dAE4;MSe}R%d3YYHO3W ztnP&7KEarEIJ11w`Kz?lk`q#HBcL|J;@0{p#+F-6uS_Ez;t3tOW(b!8ugm5Ox8Z8L zWw4Zy;Vk{dL4NqUrt5OgVdX@sVpX&b_KSxRlEgrE*uG0>Db25HpS1}3>_(-E*AtDW z&4c|`&4{4vQi{#si5XdhAKR-+2NKY@F}Ts`yN1@;wYAk0uz^$5x)1WN2Q>m{qV>>2 zEv;biIMq5nu(;0zgn07dUi#hx1r@@$sQdCnMKsmY1c!=n+4F>w2%j!rzaEk$qfnd8 z#o4?SbERQCbkNF6gtcK@EqB+sj~f{kRcbb~e#%@Je(QJ|(CWQje=EAl1s3YOB60JMM$wva=%HV%Qgt^I1Tqpk75z9qlf z_lQs9#+;n{u8otsTOCszqVT8D-d@{Ii{1!8FZ7WlteW668F?47@J7y?zS2S;24zG( z2Q_99#6qe57L$cb80z#NwXHW>OhO+NK2=M5oQ0i`XWguqG zM>1FFLT3X!PbX%b?BwdS(&-+E*3;eRY-P7~H7F1M1}FN*D_3-y=hWN{N&P?=g)4U9 zeB`_<(E3CGh_O8AWc`Ajx>mj%ywBTz{sr@OY+Qh+@Ce&yv?sJ|D4ho>&t3V4io9xG z(4;-mT4SGo=p$T)vM@2XfOW<#h{C^pnqh9`^F{tLxQ2pp&#?-^(b@rFO%lJ3y6Z8^ z#B=av&#aj?&eQ0k5m5lc)0`;NTU`$~U@E+U=}Yhcds->jZBtLSsLG6o()IZ7aYHp< zy_tM&J-suWR0QbKE%Dh3woQVW@k?SGPOA<(ooBZgUKty|{|S?$QW@W^thAj@p~Kqd zV~3`FadUM$-<%27HV{3XI@T_E8MLyhaip=IJ;r~$6l3DIF*u9ir~Z(rUbPC}55pX@ z7;n8DP?hSJpPbx>DPSaWVKM(IqFazfqKT;)8b z=?H0?7mh_JQ+dq7p-02Y$U2MOlo`XufbRPN_jq8nMJA9FGAE${0Wf5PGRTzGXY`!! zB0s3;sAVAU&q-1mSdxag#wOk9xK1|p8USoAalT<>rDPU%Yd+G$R{k_CAg_>iCIX=1 zToZIcx@ec=l5bu2%lS9JGP|w~6S8Il*(&p!Pbf+s8BeCXtxYNK1)4UaRnli2v8WkYafd?S8g(PknlN)XE>r=f( zD(jH^7XFUvCHHYY-kL{LX>Y3-px(v;h#e*XbrI9iJmU{9-I{TU$#V_f-m$G8Qq>or zn>iu2npQ)F|r1;IrwBo`O>crG^j?V2_O`ehGX5VWIp3P5crq;g>*k_E{@#L70 zKG~Yp|F9AnAfyy_NVl~?K-RNm$oU2b(**}Mu&aExy~o@_qB#pIHApvx^2ISjs~>Ar-*!BenA7g{Q>= zItFc)WC z?nMVnpwX%b$0R{viG`evbuL@xpv@&&Mzm={PXX((zMM&@dl_~BA?n($a_0w#E5(EW znrVSv)t%7ZA#p<>20R!*dqO~$+y-fD&eSU_f2LSzG)NR1Sl2~HO+ zkj+7|@7^t~ue^6R=o@jsIDj+};w8kBV!@i?;v#I(A@!DA`5e%pt_i@ky=CWaHpUs@ z(CT@~>LL_x!)!uU%XQ!kzv7FE&vkY$He_9!Zkke*ma&;lt(yRa_65Op0uUqf~s+BXvo^0ohC1r)z{w zW9Y0_n%n{Yvtz?%5j*`k`kjxtf7U>p0s77YWyBH^p*kgWJu>28V2=$!0n%jIQQ@n) zIc5($!nP^**}uo=DVb^{k?6U1yX z0#*gSK{PHS&M^y}=1qUe*lIh?=o)C;QZZ{-(qhN6e38?W_*(-?=>SR>f`j+ncb`(iM+% zKf?kr1=>F6hE=RphV%W8l8^!h%Jkc^ZuL~I%`Ey;1zAzh%L}8r=YI7Z0Uu4k{Y|PG1M$s95iFU&Umxd%?^U6Dj7pvh|4T0;7b{ypUb}NHe5!{l59zY)PiCc^B ziJWDOZD@_>-it-6J99twvCTK4KcDtM+XcR6GOtt5F=!JxedAy^yJU(mJDa*iP`n!m zo1%&CqM2JQ@-CDDqWqoD8^*;VlzY>Xa(6{-<_8p%UC_Lzs{~d_yXEJL9_3es*cs}T zzAE!NYE|;f6>p_jFq#(XJglb_foD1z$@BUmyPOVD1Ueq`iYGUdCW)O#K|QCN-{RXf zEkv?rUJl$2TlU!>BIPCeAy)kpIFq7)#JLLde}=b(RVLbErJu_HS_68MQvl3@?>DKe z?w$|9bTL|SKcxk0V_EUl*KqZT%as?lZGH@zVfgoLIGW)6#)rhDn9@Qokq;GRDwRVdPNWArCKFkz4VrU&54}jG;QbBV!`jB@8?S7SQVo4l@LeSgfxWF|p z)RGvRj?tSb+th}%dlT8C1nq-@SV5 zH7qpB55+PhNfM1Be{bpS9DcxOD=e>o3Y-9B|B|h5?*oYFCG~NnX{~N5JtE+G588CAbc|md4n}C=oDn>?!6Z^rG8nl(JFF2NZoVUoDQf8rgU;;G(lYqu1 zFb}NdKOZ-(S9{%%UBH@b+!1vX_|R{>M}EwsKYuT|9B=$#h0I{(r;8!8^5YI9lo_(c zw={58Qhn>1+P32XNTMGist9@zPBea>c30rE8ZY3XL%*VVD-vU0paUScoBArlZO&b; zE2GPA1~`T}elrR5b3R#k{6Z|{khW187us4AJ^vlpztB2VAKk?c4dq6<2z;nh%mr;pL*$Y-)ppS?y@8!o>K!p9hOU|V#gww0nKFf-!v z8@It0gmZzJhVJYfHy4s^hKog>^g2WWi;;kEm_4<=gD8@Zh*ryj3TMEAzJ(hse4L9K z9At}&cFXJsjwGOTT{r8!qkhI3F>-Q0eU`J`nT=1M1PFLoR%7FO(8W6y|wMR9wmcJj? zU?DWOV|;ORtH_&adt@#rBpI=@O0g$=mTHNjy9`H6|fI{EbL+pIltLEan;mY|wZ&a7q3uJ?- zM_8n`OADm5EiU9TKLFT6Tl!HZXFw(T&gF5;EmZ}*7thQ$;XpdCgjpJY^8tbWgc_rM zS>ufN^{!JJjcuelWM)N{=e(eXG3qMl9gat(^~3wB+YIZ%sCcEkBPx&DF0ay^7vhvz zXu$pvG_=A1-SXo`Z8@>ydYg%756JVm&$C588rub@2X6h`ZJA(8Xxmev9ll{ws|=7T zykDSa8iN~`K;(o;6UXxM+edlgK|103dON0*TOoopobbg-P=7jpizhf$gA<;DuMCw5 zzVY&pisUArb`DN>$TFp-+cU+Z9b;6u0gG zrH-Hs*NtHYUxo=Oa?CM1X&p=P=r?ux4$^l)v6#GI`&O~p=h4<4#R2PrMc_06aPw@V zk56}B{*Mo@#Qw(7C^S7mnhRMB6HLasyRrD)wIh%5m2v0eO-A#Efb;6{s$dp$wb6 zVgq-DbXtTO*#>CiXERrwj;aQ@fLApG$XRap0`bh$9I)M2dWC+yNgnMQSG{eQIuT(J z?Z-o0ay5iL&Af#dI$Q(wfSPD8*=~j5k$a{aba`sMIO9#OIEL}UUGZ}2u8yAb<|PQzOb>w~ zwͳonWX)r!DRLd=T6X(58^vl*vxnEoEP!OvovumX$;Ii@@Tz`hom_B?r-1F!`- zliI$TI<6S^4l5vqhPWU03y8w~tTzEhfCoT!E`ex7cM@%`>DVm_Surlfzz~jLank|W z-k|Pc1GfA{mxFf+GG>X~fEuG$vG!pmexAsM!gj4!$TZ`Q_|rS3BkmQ=(VZS9Qy-S__wOLH%wtZr4cgB4Y;w{>~M-0cf>sIuRs@#Udnijd}+FW;hEpeW>1 zZ;SYK99iL%8eJdl>k$jNa#6@AWccH;tMOR@3FJ;_*P0Ou2?Tb&8arNmO6Xz6|E(yvpUry_1&ki_w&N?Mxa#P@?CZ1gzn68gf zb32XXyYM|1H?MM@_wMYRdYn+!?4P}37i+M@T3`G~g92BM-q^*d(n=_! z4nAHloYAVSz`MOp?Y6Zpm%po8G_6KJOD8%l@F=oRSQzx|zVS^^G(B&i1?7tlSrJVpsKjbNpM)y!SB_2;`golD#JM#dOv# zhQx>AE?Zw8G(P!iP-oZZ78u8FrwA1p{@?(G1xTJ|Wh}JeNISI)4bsdjEK>9#I{DKqgowp6*@XFI4FSPR*dheGUI|y#pOS4bhlRl_Y9coIVe<;MOvP3HLOBpp`(=`8T!!pUiowK9A2571!S2(2ZV>E=4Sm9)Ti(@|65z=z zV-G^Dds%RQHwyM6%)2Fmy^ViyPD{Xc>Or1n0Wq%1b7YI+bBW@QbA5}8cUxYO%LF9r zV3rLxNf(Wa@@v0)8)QFIJyedjbPwxYON~Hl?*>pFYZjgX#IinP_0Dmghvqs9noXY( zm(bXE2v!P;Hw~~mcF-eK{=M4slk@m<79f{BSulDp5;q9NQO)j)*eQaj3KjUL6 z@TBebUNLJdR;TKA=x;fG)gDf|29Mmrx@MB!bQDgse)>=>u0wJZ>aZx)yugHe>9^=J zcd**q@8bs;+7sWwbx;0q;oR3*glHTK%sBS0p-B(HTCq`o&`U=%t z@d4hM5A?=g*Y6w{JGerpV?1OpHVF~qt!IOUxL-{u@WfY|OIm#0(oz>sSfwfmu5QI}i|h&( zIgx7~VwZ1Dvl>fzCQx$*Jfs3NMD7$i2cFQ6tQGRf?hRE)h1UD*-I23Iv}U{b$MM2m z;d7p*>z;YaU~6PHU+kKqogUsTIul1!N}BiSS+`B(joH4f1D&;lduhupf{aRS%k=M; zha|a#Ue)L4j1t*Ao*PRQ=sA%!bd*v%gJ%^s*us18k6Flqn|m9=w@VN}MyQJ;Jw=da zlJ_ITwHC^6rlY{!=$YAEIVhG>)^s7F4HDRurx0*5v?{kXNABkxpy={-#VDh8o0vO! z7crh{N{+O^P%6I?hMH#OEUs&M5~hPgPdWv6>dDwmqe`}F>J3k?ode*(K#~!<@Z5Ay zHryDPRQb}8Wb{7B$M7$?L&=|wxEQ>u9w#=FctOBj{%PFImUuekaQHT-sdWAJ@e0czj_F6iZJ&%`LVt0fH@5RLpfKy7v6@rB$jorR>+*N|;BM33OpUu=xIS7;7{ z8&ef|XAf#Cs@;qo%pL`c$>&_*!y$Cvid1s;21s`K(I7yLFIeon>wZk~3T$C_bz{^( zQ_x$+E#vE4bRu_PhUI2e& z6zNo6ER8bO$}B-OU4wizdq!1EVX(CrJEj%kr$twq!f`Q_n*J1})YdvWW z31OwnW_7NG;D`LrEZSF=CVcF&2Xbg#zvm`wTfMu>L@L?@jcmG7vy&D>+% z_AZ#_e=f0%(R`S-t#46`)OO{g$BT3OZ9;d{GONgGrjHES#`n34AR%);Ul&`FAKs1m z`TD!=E|vS6F^_Kc^{#uwjn22teliTxO=3;>gS{1B=um4I75SqYyn+}^nQ3Vzm91 zQ#mbA;E7AlC2@YdWr1-4Kqd5U3O(mJf^jL9^opWb_ZM`iYOX!0i4|qZ03aF0HG|_l zoo4*LqPsK7G%1hNAWl@c`7g;&WY9{FE`zvaTEz#`YeAi z8^An(B>yWb_j?{7IT#i(o_P5J`sk2%P$=u~wSz05SL6MVP3rJT$`F zQvi+^Uu|^ssMVb;#KhG+#b$srk>YTyC!G_q#OgL^Hyy)?60<$tfwd!!*ihqr(+mKl zQlTFQjDl&&3f??b(-J{yXTuaCl)|A}#~lD$Pwkl>`rofrnl`UFxTr+NNx@J~lecxl z6~(Jf&-m;7K7!RoA2|+XDJ`+&F61c1vlJg3cAsYj@bZ;CO~WlYC>tf;~JLQW6u@H;mO zfS0hRwsv$9AREM6a-YVFCDZ8UGYA~-LJ;)r0WwqTu|lNkC=~?za+6kNa+d` zFtsz7i4kn{Q=0d3dsZEiVmh9BBQ}}H#6((iYK$*c4Abg?LQHQ_(W+@HdV~4F{T0Mb zl({vL{%p^>tRGXPQix{W7|4?G`yVOv;=P5eEX>JG#Q34RK}D2ZU&UFY!QUTLFVjI9 z-Mcs##Wx`U4k!76>nxG)jwhlbWX1f#FeApH?TT>JXa6wsU~P21|Fd+&d1)VoQC6_Y zW7i3>_>4+hT~sC%7n(iTvw$BonMV9BM_EQsac{!)enM&z?I%daMdR*VifCD%j@G%c zkT^^Kd1m=iZ=?uU0EDPYkDSQy=yOG#A_dmBeUbNVmD80nGww~U>vNnD%x{iw52HKh zSPr9gi)`pOI@mo(Yywj3$rIGYuW#x=rIDmIfg0+@j?pC20$mjFT(?DrMsJ#DL=@5u zyHqXSmAd2khu?k)H0|dLVBPj%uz)HREIH9tYWdag>b4)&8caSL8NcnQYPMtSwSrcA z7uskl1X5pDIhW|^?V)F(W8nJ^ZdXg_qItV9<$-y>B{=Ku+}3mIz2(Z|E!{;@Qe#vP zcTei4A%!q~yXkg$H-VOY3o+e?taoFMfE_U35TPA>Z&D?gdj2`(%Bgs)@Aw6G((Vpe z$&hjW`n;<9v)S(Omy*k^E<55vE$@B{PTRe*FS?FQl817W2YsQ)PVl@kv$Z7`Mx5kw zXJ~nhU`Z02{#gvsRNa{+QEfT=<5o|5ItD7vE>Pg|c>UT+?we1={dk|N4%8@l_kOAY z;tUvo0pnZ{*{Z_(#70erG*&0v2?I{vSQz}(?5y{n0Ox(O$uI7O!2bGOw{^ree6!($ zzRSVD@Yk@$q`0Nwt^qSRq~^)bt=EE@FK6ndyTgCzM4?!wcOlT*^%wC89LH{_>Ri`E ziqkIkid!?StW^JI@6LLL@=+N=dc-=LI98IEMf-nw>WCGX-PkhFHQjVrlE=-8&zpMd z!#6)aNnKOM7!q5ubV-t!Gu$!-%85)@)yi7^znNEw4IdZ##ZTMReG|=(R7}Y&u6{9Q z^Dy@O@y2kDq~=70K3whn3fWsB`h7OPhfo$=5FDYON$Y5kmujvgl68KV&-2Fv+|ZPt@x}L!QTz={V=Aj z{Pp$Un^VHTroo8He*JN=N(+tiG#h~22D_9A<0s5z8*K)`tW4II2qm(3Oi*@7Etykk z!&(#5z`~42>9LzI&Zfrpv)1`9m-P$9uD-A!B8v^A0LWJ3p~jz;CpX0}t&`$eY6u!1 zSj|2Lf`8<8zxi4I0uDU%%g=`$7pGwM>Mjn=*~Q*!ExrW!d-~;}i{S=xn&8^-VdU77GUbqx&dSFb`LJyZozBDgGrm3C#nTDY%YX91(|G8<`kVK;Y z+}CWOJfz2Yx`UscTi_O*FjJS|G^ajVt92Vi_jl ze?Qjy+JQO!@WTzAUsB2IF}HXh#BsdyY(0H6{OP}|-2xvh^u-6>a6Q{8Z#9&ern?0R zCr|&`!JE&7WGG(#)MSxtf&XqP(AfWZHN{AH1h32p>OP}=vu+Y zId%fi@XT;=59I?|s7_9PLe9UMM;Gfmq9z0UW(SgU+wl*J>_)}^ERZrcdPYx8bvXVF zRQ+r8{(Se%Io@D;e4l#nn3);*Q8=JAcCc3?*AsE;fBLEJ2GFX7Lg>#Ukk@W#pJl|~ z{D)OB?1oyIexuap-|>!Lv-}_Z>=ryF?`}B4P$O=CE*@7fex=ugnEdhoPc*G=nQ7Vb zC)4yMIr6cZHV6dj)E!v+Ltfw#%hFKkLbD_xt+YR&Jdr|Kr|)N24up1eo;^-g^a!h0 zkViogZ(LC_o;JG*f@_U32*El?m_neAo0R-%nZ(&^``!?MG#d%yp(>RG)bQh!(vnVNz_H$Sw#aFhDKj#}T z$+w0Vf>*qI#9x`pPkRZ>0P=eDs}>~Z^Ozs<+2a3onXmMOrw{7Bp7dS)33D1%LaCXg z_+&s;WmE8W$K=C5o(S~rFPm1dKf&Q+O1?~@loM;8a3m+}XD^2(kO#kc_&yBq_vdv-@zjMidOWo!w^D+M{^__n8TNy0pG3=OU?23>Kg9mcu&i^yE(;)j z!r(d4A0MAi@(Az#KM%lPE=Z@3a^Ao;bCeP-<;l?ehg{L9Bg+1rDb)4NV?EQ>M1LXG z#QuMvGk#rIO>ew=8ISMx-lng(T7Ur@U9m1(V?hf2KZ)r>BauWVt8Ayol9(yq*PC-( z|6peZFVQe_9^BbV`kbRQ*Yv^kFl!`7B=037Uw`i(KMUya_<>Ibm-@>^eDeO5{JjyUTMi2_hjUwwcR zVfGyKe&zApPK2^j&z{NW7fIm}u7j!-_(RbVFBVU`h6jgOb}K!n!>OUF!0#at=|3 z^%Tzs72fMvR^)$j*8lA35TQdOCr%aat({he_SgTILA@9Ifs&V3I!*g7u@bIYyDus}$38rv}q> z?{~ZhJfdKsjz4GDo6iynH{|-ncgy~qyFFtM)%h1fXDxb4USJp8}o`q^pxiDWJHc=156NDM%5&!a|m^Z$6si z#)vFI4gfCfexg}Z=avy?UhgKP$h0x)b9iP`**~xgf$yqpBl_rlkx;Pce!i*eAp4)Z zC0zld5lSE-18-S=IE3Z)3$#dfv^RhL{J)#G+$a=u^!SSz>3Ddb&x80Ry(LnN;bDfI zQD=r_Pb~#H-n=%C?LRKp_3PTT>E=W1BqDl7h`R?__f7qda23VjO1$w`A0uBHW)lnl zaeE0m9Tc>kHw=oxe>com0+p1`a$bT~q$=unYj2u7itGpLZ~^Ll#g&^QW0dNrB;I_A z7d!J(6u+WJfqzsgA^FvAg+gD9?L!b)DiICZp}W#=K@(bjeey-;{V`9uJv9 zj~81-`F9I)(|qwJ8jk{3>Q4i)C-#^tN{rTbhktxERM?zoRW9gM{q<8n?@u>*V*++1 z&worn0+^tQMzAX4-zL!T^GE6Sywd6RUNX#XE*w_MdTOF5ywBDxcu0(0{z^|*&sg41;+1HsBUF!^|!^!|=r_-gUG)9?~HjKU}XSTJ33v{A)yV zDg3^D{TMHB%-xMx;EVIWe8x#1JET3VCaN_&Sfhf{pOU1Tl>X~SfBuq$#;wtJSyup& z#MfcJN}}H$#`3V(aaqoD#TOZsWdbGSpYHzc*H_7AZ8J{FZ?it)`K1uH>RKxq1 zyJV%HZK`~0ND&dl`*jmviO1+`eeXt2q)`n%FXxHVHH9H1U^Jh5)Cx z-MjHSmcxA*EyHR;%8$I%B!9U%WvvAGm;V@bOBOeXY9=zVwlOEoFyMx`H#fSV|v1H#GY zD~Al7Fqq*3C_wv0Tc>H^SJRSf8L?fzVhyJe%C7MV=SGM0=3S=hDkY&bp zCF0GU4BtU{rxWkG5*F{eZ52OkVPvH-8S=~LN3Hz64Wq{+1|s4Si&Vsu>FM9<(WYDz zB1br4%Si?jB$gSmsihfy+2E%+vl!8_rp9x`)_+eTn|?@=7alOe8uWi%h~yWK-g2)S zapc|Yxq*${e^I;Q!R`6X<2b2&S@li%ebodB&EDkt{NJ;;iyB>gz4U>&hxEZbiyp{#?MFS z0UM#NkRdR2D&6{2&mK2(6-J#DPygiu{@d~oYH_3c@(Oexp1$mSiu22)x#%e8^CIZH zV)2)N8ms?gW_X|!w!zzL0kzG=PDNm_`cfen46kwD{yFU<`AsnbV^b+gREYWeMkc@m zwl#se?DT5@|Ne%Bop3Up{cNv+?Zz2ethE?tKv{~silQqId3&y~u zy{S-b3f#bHxSojwAwJ#qI%$wO*?DqG9AMy~a2{r%IF-{)+|{RaD#-HNq(7&gil=}t z+j_C+ll(SP2pV9d_aW^c|9vIa11&n@xNlX4N^^Rg3eBJQ6d!O3ES{kD>nUsoD#qJM z!I?3mh_TT~#1g}Q&1w?E==8?e-U50DkMzZ_48pdj-ocn}9xsm{-0_ghjd&g#oBxj6 z4qy`N3zGjs*PF*fy}tkBC6P*6$cZG!QV5mZWXl$^B$7SZ*RhV>Bt zgK4Qn!TaAgnG-L<7cKtJzB7O|?OpwA&98vV zn5@0{@z?oiI|r3r;od3g(^#TdU9$Ak|5K9OIih$y`*TjDr0yVtwDKT>jLu5~fiBN` zaevz<({j= zeLfa0->UJ0a*DN13pdi_-}adG*Qc?>zGyUW3&F}BKG>ot8@N*F(yuga3DkYhJ{gBJ(dO0}q z$}~l~<6u{yS$5e=HTYoS20AoKI>KQIDcaGs&Mr-?8Cj z&foMBzWUoVJRe-YcmZU8&~3NV#Id>9dzts<;r=1CrIQ8ok?Gxm@yOcXp2nBOJl!u!=jOf~u`@oai z?&!0Z3boR2cf4A?0k~t=*Avs%|FyJZLA$_)%+{AN{kAS!7vLMLlbnkofbj+yasX%f zRi7Xy$`lgUCYCxF5nfgPxN*jc}@H#`U583Y($Isf22;S&UZ1o`sUZo zAGvWJUHN#kY484KQ<`}zAybr}&wn$vcMA80a3%DbfSrO0VE25JmudgwI%AJOlL!0G z-`}QJ2JAlAXL#}^U|8BkM;^DZt@+!pyDTSq%C=0f@4Xw0b7~PPuFl0+IA(C-IB?<3 zL_E@Eo`1|?6EXDAH9GyvDu)|hH1;STe(WRH-8MEW^Z)RGOXu?$b=A+%UepI{&k|u` zBjV-1yw`PYX<%!L+L9{%b|00dz>-R!ipJNEmLzm^kX>z^8o$tq+)Jtthb5kyi~Ab|{q@US*X8WapFU;}WA+V(dT!QKa7WiS=`hC(=7zEz7sq9Mh!(SC zL8(knPg{!Ig<3~OZ{G=3O{N>m+gUOS zdLlvn+oJxuoPQ@T1m$P*r0~sN8-Qnb@^!ffL25i6(SIWF&XAZN#|H!dc6v8t?ku-KTD&--SoGwz)^i@FuwFdcp71lKJf^DCcayo zb2fzEbG*4YUd$z3%JNkXJqt&{LFgMA^37ixtzG^C;?Mt7L?3W_0w>*@s*aOgb|Rt~ z9$0E!zgZAKJ3g>7q|9zl@xR6!wEbQ&A_ri#{d;49ujE7luL4tc)YKEF7I_B6R0wrp zhpf3gns2bln3l@e+zleXg6F?FvLNl0Bg0t8Ypp!md4fK$`h$R~$t%A;5Wolx4taCX zt=Dr;jYU)fTS6o9{~)lLIC z!TUZuy0Xv+##=_$FP_#TMd$$h_1hO_xtzXwtFp>yzU(p5t&3uS6f>5-u7d?(ym~0zL_T zTtQuktnv>goLoZ)rR4FZBjT(v(C~~z|vy&;;1Iulv z(3a?epc_qjAq`FAc^#+x8|wTtv>od$@n!vNUvY40@nE+qi8nhF!kMM_p z^BShjt(DRH#~yO7_eS26l?A99<%-*U(hdDW`g5gzRFbtEbkvG0xp~8gcZ*?1_MU!B z-jAK;-Op1WIf}v)`H~lYo!PK^(0Z>EkBhAO=3=+VP=ZdG|K|re{=uEGij0m()j<-& z2i;T>z1YxsI_t_?1yZQLE>M0P)$}TxPMLRU(t>JpR&6x+)A|CTqjpqITgQ(O12?zm z78#{pRRMfGb`Gw=|1XO>MkUIlZ4^24cs zNUnJva4ji1C&IBQz^nA5A42PnEW3a?zD96~winX8w$%JqG)Q7qS^>&MZJEw7I zO?i7a@G+sQo_SUo`z+cFa&hXdkvF`Tr|t|@>BTJ;uYdb~^^do2iqQ=!E!Jp0VTJPK zPv3?m%2>qWkl?Nlxmd3b^ufs;8NVby4K4a>`MRAtJLr4kjUcU;pA_yBcY4$cIs}0d zk?K)$QP5X=6j?&}fgPQe@mst4JF+~vUhY*r*LN`Su%c%>jxAR_Iwb}kb2$XBvD64l zBApf40?cJd#tt`h{;yMryKo9T#dT7BLA`nKq_s@ntCdA=AziTGt*C*Tb)1lb@=+kS zX5k3>^!iZqX%cqf>3?G11b38Sz2J?FB7mei{9)ql<})a4`;MWA2!7|AgNDMcp>e?B z-E5RmG7V9d9|?{HF2vKpH6s*@bWPLt(l7_FQ^>>9zGlurPDR&0ejEsmb~Udg)Xhv8 zIS1zVyXZF{;!t7^s?*>wnQU;p6A=JvKE0hLuG0f7{)b!c?pCBp4M^*J8!_hH4MGHJ z8;%$2U{9xvlCP5ZQ}1Nf?=KA>kB--I`$1`yWS#Hp3tVJ7959PaVAPm={4d-8+tX%r zQj4r6$gH`5#$#UH7jv-Qx;ATK=4%ow+!l!ZwT<4?}~w z=^Yc@b1UOI$>ORpdqt3m**r}%E0x+q?7T&etMayN{QO+VTxr5$mxEccXQ=Q|u=v*+ z|MC>c7jK&rhmX^a7q||HzN&Wm;D4!3C=smOh}@g;xj1gN@dz(EoG6Zc45~$)&B;|) zi+>_Xjzad42Y3}P6#deQ=P7t0Gftg5+H9xE zCvMof0T-lf4JQQ#JLSuG_MZYVR5`Q4_)z~@*h;Nj>bVK6bQ#3htZTE+;nFrshSzbw z!{df+>@`T=iBHb&^fAy|T?7R=l?rIU#QPk~`zjxTWK4`$VZfkJ6ip0If2Ng1Eq29K z)d9xG+R5;GKC?Q$J%;^Ob)H?y$b+3Vp>^ftNa^Y|cMh^S!myKF@EWf`N-oNN`hns# zADe=G|bj zA#0C&CIwUE0y7NUp`!6W-6TtfoMoTj*5ZH;UCGm1Cn1K<6I#bX(XdRm0l4tf!(c9_ zAKF74PKh}?x4}W@lo@Wv?Oz)D^OtA-4F^rm=U2%4iBlo|tbf*U2HkcB)dbLj?D*8U zAP%RO!~{w;QiaPAX89DsrmgW}zIsU~a;LCXY55Lu@wKB|omItII_knz5!2R87sV=k zZ`@^#A#MTDB`4VO>zCS!5L}n=@%2s6st9MDP4d9_^9yG}-vydN$zy<|)UAJFkN<|_GqpVa;7@w$xp$}ioSwEuPlFqy{Dt?_%RE7sTvj=Gr#x410uHy+f1+IN zChG0Jmr;Ad-$GXO1-9Z_QDXW%;r99H{k`Z3=$k8_P4k*}ocM#@E9<$-7%KhyV&!j9 zdET{!$$K+@LVj2@(Kc<}*l5`?Y{R*mlg6$isxmnz>Q%9|5bXZr1sXNRL`g66w`9J0 z1ypHR27)HpGuHKR>4(k3C1-`c9u3<`vunP7ZyWZ^=WJz>THUeiVw zuI=qn+r=ea7*TGB8%ZH{hBNfVOH1zUB!inA;Yfq~pmX=OHctEWJ_J2fIVNzTCm@`4 z?1|)6Y@`r+3g+2bi3f46tByMzO;$2?;K3D3-z(_ zIVIb)hq{ZAygvWCyz;k+cq+=ZhS~*(pgK(4xP*vuyMYB)N&=Uu=mgAe)gj82TH}0J z%03<#=st!}?>i&7a*L9>?^QZzfwY##^uAKx;Qe{0D|Np#Fmc1p!(TlCrb$KlSMR~C zYg6?*P7##O9<^!)dMyHIlI{9~v=QT)f9JNp+}ia=K~k)6FYzUYZt@929p?3h=xsbE z&z$5f@9WeR?#~Wx0^Qgmah7=yCiv=;3T?^XwDh~wc_i44K53e9^NX#0XfhM!vG>@S zB5yi91^I!^pMiF2Cilyj7k2Qd1fyrwF!Xb(X@;JN(*!d1n;>x|(N5e>&Nz5vlW>_&P4B=9{vp`*e(7nkUCCBkUX#FOQd+HT z(UM1B)T}PAp4!Fq#QO^?#SQDKE)Q$y)NPAQldo9^J{ug^$fi(%ncSY>1RYJTE@IV+9Q!AVWr_a|3lnMBG#ZAfS z!Am&E*LZFPjJ!Rte2RZk?&=rOAnK5MPuwY&J_$oyxxcqD7o=LBM?me2R|$d|Vehe+ zQR;@FT!Y!AM_UIs)bC`jJZ4zqi)^fah)zOimdfNdQJ**tW%3~w5g-S#8zwyuNCV_7 zzWA~%&YAkC17akOllhvHDVwA6oEgF=a&l+Y3-Zd(Q^M_OXuc+GsU+@L2FE&QsAp{P zC%e0T9b&cfZzaX@p|C5phe7GrW(s|`Hl@62zuzjJs_Q#5&OTg`yHD607=#Z5rBCO{ z8c)AoB{W8`j&vAX)cIHV*H7+rdqJovW)vduG;YN;I*sMM9b$Dv@x)FkH{$!OvS)mC zPentsM@I(zL&BXO87ql1t>Z( z-3|L7xQFVpGsUSafo>#4wUto7W6bV&CG|r7?iGoP{{dG)wn0)v@i&4Ju<8p!wy^Ax zw#!}*9mhRsv!%D`g3{*0tPKxyVqjIxz)KhqN;7O@NIj}39#wu@+$LeX; zgrrNsy59BWNMg!XNXKWM^;BC)YsDL^aHK-*%)>;aqa%Zx4~st5BQ-_qWnW;t~@-ElA< zQLwd|zp-ciiUgP7*x8iYg&8D>T0#j^=_nh~axI72gYrj%HxIttWU1ZbsKr;l-nmU* zVrD1xxW?^k|GB{y>NTi8hPjHfxw^}%eJQYHYK<@TkkpXrw*IBcZR--I-9$*>CPa{a z26&5SW;gT^>3F%-{KUTz%I_dPwr)XMqRWcW?_MIZbl-aV`RRRYzNepaTzsbZK4`ZW zJsCeqA7Ec*miPRd$JqyV$jsaRFnNF07NlGw-Au~7ob@+!yW#3R$D{ln=sa6`RlRb2 z(Rfo4*0n&K2w4#aGu~;!jOgfegFP14q9-cSlSpb|sl*Dlygzaq4bzU@_>a@Mc`@Hz z-jDfZBz=1VL2>pqPF?Ez$&!%`M&8Sp z+8q#5{P9#i!}#P6SV9RMcO3r7T9hQuE_5~jJGY)W=>L5>*Ut-Na1cwSaa^J7UeC+GgNk{EPS^j38->TSNU$qRV+jgW>!$6Jny;- zCtX+%#z|dvcEG5<6zG0-^A6a9aL)Rujsr|hEMQ_=K-H?=L%w7bGrA$u3bRTpCaH_=AXEC#^3RW z3}gz5`fToZC{w+p3r2}V9D*gyCvy2J){SDbs&3BR7GSRy1z6BT~M!= z`NzD&-!KXAt8&tp9%u;5cQr!7>V%oY%n*~W*7P3)(M^YX_z+IBvV<3=pBnvypuT(V?A4f@uB}r&ODU!#Czi6vxy3kdf4#N-URB;H-=u=P9Q#C5kkNA8a{t#;1U+6Y z)Gv4cyX6qabaWr-o9b`x1B{ARow2;%&hC3Y_o}l{KRr-?;A^bv`5a}5SUdfm?XH|m zx-d-5Sq`NMSsv!2wRi*qatz%)8-u2-SvIU2u0p*16KR0^{VVtfia#kIyYHl+>TJ$;SH1<^pJ#;oj^U z_B?nnUm3nMfq5(n0ddsc(rnZ`J+UWO0uCSdG zq6*A&hD2|uD2kQ@?M7WW+4O~aUBN*=siUXo<}UeMgR00%23N7|Q@y^T$Ew^>N!+|V zF0f2LP^FWZqjcg(*oO^uDofzPGoD_0usx3AZus_UN%R5uQ&g|TC}(|zgYK>VoTMC& zJJ~~C%g#KPa>TcE>4#ICNb%=?cMtxyA@ZgZoWbv_#`KAv#^uT610V?nO|%(+fZsRuwr?f$DqipHa^o|GJ= z609_3Bl<%*Q1I(EEZ1l>`rP_HtqC2{BKxsoAY$ck-7{1zdpkCN*52M~uwAp#t_}2(=Ufg0s_^CpKM6f!t>AKoOGvKb{nrpP~nUI`@Zt z-k4Y`s7lTDz+SlIgUi5VDfl)PRUM`~Eyv?4z~U>={{tJg3%*l{bWq;VvYYBXGehfH ztH(-LCGxna%G%yULOfeO<&pVw@CQ%8gLYA6Mg{Pzc#|)0rNDb0S~~|;ZxeYNoGO@e z7?P}b+hKbk(G&X1_{VB(rAn#kb!z>Pmwd)ZjKKe#B|#&jmj-7NWxB``6-ocyVb`>p z+`2^xU975~H3k`~RM+At7Eeo|RMJwOQ8DM(gtFwuG^ z{ZjfQY=lheUBm%P=V(@TV;g^p*TPrS$5^Ku@7ElIcktb&H}um7F*MVa)0f&jct;y) zmMvPhzwvz=&&VESceuYQb+8nBeDf=9*W7?t!rO=yStvpC%6J~+RUv&g9>HCYG(mJ*}Y%{+41eK zK+}Jo+VSZWwd@d05_kP!~GT=2Y9()V}mR_?xHu z9lX&GFEHmI_exXs(9?MhUIk5T%@HPHE!%g3d@q$TSYF+&F4?To2>5SevXUj2dUk>z zPBZN(;wSJq)2Du5Kk`hs!y8le6fvjO2gHqtp#YX%WWcLc0sY{;^rF6`TVRcJ-b@z> zUgF1#XTz!tnCADio^-yosDF^MaOF{tED5axMkHJjn2NiGOBw$L!1i}Ec?X;gSUG8Y z0HRq>gXF*ZLd<%Opv!s-YO!EwP+PFhgb4kh}X- z%hJSbVL05FiN@h6o$ud@?fHp3k1|ga$tOqv{Z^@75v%MClwcjV3bWRbjFh4Zu(m{r>&g+LIwpqmh=S6K z`-pA|g`oFzoCa7WVUFYF-%JVZA2{a|Eg}l4Ku_yD3L4g1&Y<^7YZsDVomg;R=>8zw zckl7e>Nuy$Ro7QX&k--)K3-mD97q~U?*tfY`2qge7=Wu+8_rhw40tJVcE zXu=L4sb}I*IuFRg#|WB)F_fJr{@@$i-CIoW0zUA$(HbB&{}JH*)pNO=tavPl#Fb$_ zFnn!6qe$x8eVVtcQ*3^^Bc)hTJ8$H}ayQka#nU3r zf6CKO0?7}txwz$6qw`jkUzas>>(+aRn%&x>*P;ycBfeJ&*nktpNm1BT1T zZSE7C{kZbh>4z(s`1(a2>r8mXJ|p*9qdU4mNj{5NwFctg!P|#z#Z{&<$k)GffnQ-o zuJX+Acm{&P*%!Qz>5AvN_U|6mfGcj492Wr7-hd2hoXv`Gx zfsXT2RAJZvz@p({Nis_r=!Z#qzBs!&df$WEl|HR$ciG+#cjNu?t)#{e{eH~h&P_tq za%=u}jr*3J*IBDDl|p8et%c`%&ad~*?D6rjbjtV`Gy6DCsMQ|S*6!S@TVo*W&kc8p zuf+rTuQ>yP5dFa|pAMDxiJD7Xmg_KJ@r$hB`(@}4KeG>8y5H`cs0a8}o5Xb~+uhaW z&orv`fuf09>p1QBW4khw)>W2I&>!L77Sdk2b-#+kqAYGpT?-F$XtTga3lfUvev zmkH@NR~7->6eKAI+7k;DQXN4OIf)S`jLL~!XNL60v4ieMi8(xC`Irt9Qt|I$RA4u` z{=5yP__LOIu;Hif0D7k7L8JikPKPXIgQ41h=G@h%dBHo8_cq@gkzJ!BX`UB>Z1L3% zOIhzOo||yEZJ@AYctobK2S`p?4(C6DAOtCWopJn5?WQmjJQjFLFXYv4sz5UH-*}BU z7}AnW%?S9&(6Ta8l-=00qRR?&x%Q;rx9S#XutWDh19IIbs=`eXzHxuR&8i%ejx>YN zwSqN2sKB-5XD(dg8Nmlb+Fj?P)dhJ8)g(YabX|(F1im2eccTqxj`4A2WVcm2+uXP! za?kk^3taa6SFhvySt+!GkJi=H{UrjBHzB3PwbGa_T&XkY0Y+Cf3ht&LgE*VHX?Sth~=_f$<#7`s0 zZSM5F^l8ous(6h~`s z^G3;`!m5z204uzV;qIbYC47{Xc8YSG_`sPiH1;PEQP;0YSSKPP4oX)^;>F}^( zkBU+@`Kfmsp>5kWp#)*h)x))Yb64W$e~eBBC7<8@CKPyqKs@G4FV*6mt=urv$djg` zy10sjnGKirrt%*iUdd)DUWfx;xQ_QFVej=VcvfSP&&KplCbFf}_;rfwJjfObO-B#ZCwH{uPBGW|j|Ifdl{1nJ!pBTTX7BKosN zi;llea=W0{n#bl`CSGct-nO+aLbvpkAmqvjdEy^SlASBuRk> zhaXy_MXYOGAJa`ZDWML!)@rujU+im+0GwJ!S6q7|!g|8}StId_+tUR1?!fvI+j_8M zahu&2bT?MvGn@GYJSJ8Q$G!Ba>U9^d)59{;1fwdk>;YPAE6$DKKY7flVV0wb1ASF` zRQ(G?jOVmZwdv^O$PGucc-@03DA0*lFDibxcW@A?n!gl>e2aEcyKSrT4Hgy^@P+96 z{Y=D)0H<$tbiJAB((8EwOXSs2`BKsB5t|&PrcphWYsh2Un6&g8>4{|snpKl_3hfO- zf+7(u0 z%k6nvVkojJhabH^6sp4xv>*b8?bPaI^gHg!z;>}bGe2PIZ{DUYsVsz3zeD26!HhK) zEB8>-nXqNX%sy=V8I^Wn>p7ATGq{pGwKYg?+)YHg?b~jVdM`F0Qu4pN#C%~Z1)tsp z3Z8e{8>G!w@`}O`$K}&;K)O6vK1;exUab|IymvueKjEK1I_^SyS69q8ngig8L&#R48CD2M z&pCGVxNyq3h&zkp0B7w(eo}v}S1foTaXrvU z;I;XZw9YcWi1YCwd9nWdx8ezBE6`)b*hv(6%;<8pK&#_WMq?6{-Lkdh)Gh3!V+Hm# zieXh8^%(n2HttDZTULgg8!dYG=Rex?tse1`+#@ z@S(w9(*yr7PcA`iFlFb#iA|~6FAXUPikaEBYPAo5Y}-jx`g`~bvq4u!7>0@^mL2H( zPZC$VHc9ZA?C)*$vh};mxCokEEV!(plL{jHgvB zaiamz3GC%;H@7R|Y?CD5+cr_Qpkal-x(`QH-i%v8fXJVsUM&zIPmQa*9Zrku?tWOi zR)@9kJIOt*b`H?h9(emwPT~A6Kpl=iT?IjktBpUrezQj|OmVL3fIKN^-#8Nf@=9!M z&e4aNqNR@7$Z}51{l|RNkZC}!01i^%pu3-1abT=Vv&tr;dyCJFX#uuvUeYOK1 ztDxZ7sKiI%+ZZK}^{W*nE~Yhs&zhXY#aD1bg*FsNxV$^qd1XvKt2>3(!@1gl%}jdB zYu8#~0Qgne6V_pjiO{?OEBemRZq~>t;{^3d7-1_w-uF@|_*65|(tn|Qk!!3Wm#)A1 z*4ZA17wD7A*$baIHQzmXonB=5F+uD<4Srx3)u`1p7oT)27dAd(PDAbi1S;9ecL&cS zE{l8VXLHUtjG0EhomyPE4VIu{Nj&W8D=nYi^O8y77G4yL%`HN9R5=Ei6h8R(pxhGQ znXv?(QqE+Dr;YcN4zE=!6m7CSPTQFsNhNjgjel<2pD*H#VOV2k_i8VQDC|1lhPUF{jbVP9_5b=jcK!)|ydiY@|poZ9U?^awXJs1`1y z#8$-5D&`n<^33HiAAp)a7kGMm*MHX~CYQHqMZDF+lL;BlmX~Vt~ zdmO8N?#6gD!inq)2Iyz1y5uh@u8ddYd6^RLc?Lr&-c%6ISH z%>m{v9ucqU)t-K`y$>kVS1f9YHs3iguG20LL=$nsoMFjX=?Q=yim(0;XhfX}adBZ` zqA&59xW|&VYu_kaY<0@kM`o$Be=#ADqS7n8_M(2c%EAN+B_B z0oPDfPCz16H)gYN7`TfiB@h}L4|a^m?;&fGc2MT+z7q!;LIt{W_3=xKg*oo!!26+< zH!niGdQKswxOM*YgQz}LAXj52X?CKb4tN!|Xv5F+Yl_9v{&5D8Ae> zv;oSXdM+nL3=&R?xEx^zRDmEy#na3$@a8Rsrb@*HSZ_Qm-nk+V`C;0~YP9dRzbD!4 z@3%)sj_r-mIv`cr35JqU&p|@{1FsJc;q2C)xHarXhl>EG!MMXlhf zF@iuE!B?dvZhL;8R(3atezEOIzueXdhI`k8X&EA>fM;NuseIxXe|OzeBYIaw?PHH< z_^cxO59briI(-&XZ}{!3x`s~d@#cN}^GF)17R0J3s?!^%a?R&!gbwZ;XlKk~ema!B zs(CcQ)K@idgX9fFG<_sb)m_bIuzTT)HKaU0r-Kj`_ zYxtcXO4&-EpLr)FScL^L^o>by%{0y4vKEv@PX!R>j)Xe4E>hThCjr&C1&l6t7a&Yt zhRm7vf(~q9ZAhK`uvh?GpiCA&x<~A9r5|n=?y0OCPT>P>sxfX|0%*#SIELRS%K~!o zDjrlo`~HgX_do^_#%o3*xA%+84%ChW&+|e|T5(W$BhGRLFt!_X#j46E5StQRkM)kVV&4ZJ0DO7N}e` zofNtHG3)W-+S@7HvG<8hHr0v{o0N`M+eF5mccV@&O}2j}R29yPemzjHyAwXodxr}H zj|a5M4Ow68c67eKW$=2d`xadyA;SQP4>Q>T57k*qkKG#D-WRBk@g!gvZ50164*&I4 z(8|Spq&#`${@lG&*IneU3(4Nn@^^WrSdp#b5!)4s_sgJC{|CAO;;aBOfF3W=FJV#m z87BG;Q$QhXv*nBs;~8(+t!`nl3^u&P)Tba@5CV0RXygB?zig4IdD?oaZ$2KB`-sqUzAz@*D%??gpGhg3s;-QD2Oh6U} zrVw2H8TilHefLjv+Gl6mkkyr>3AINoWRy=~0vPYv@N&0#HP@i6=S(w>{g2wzeZ>G> z+_Yj-grUc)y+XbTpC7;?x233d`?4320W5A>IsSmUqHB66Xa|59LY_?!iT(&az!i_Kk95n4A`9xb^FGoA3OG*l4eVL9G8&+M)x19 zId?XTO$WZTXrKZ^;>((rD%_PKH$vf}?FW(2{Z?E=a#0@SkAVwom0K!4@32vsZOdLp zvh^%3$m1Fwa6bV+sUGq<0}-)adfY=_;wn5X&Y+*dO|Wk z=Pppe@LN5Sj$2vUu?j$Tl56>veaDUlte+mg#B)vzY;Njc@oDq&rSizcvH))w-3lJ% z@JSjXZ7*p98wXb`MJVo(07e9iy8pITu0D+ z23>ywTnl?L#d>1)>nq$}<3InlASLR;H8VV)o9NZxV^p4|*Et200^QNKJ06@-Lqari zRs4k2-uP7V{QLc6zFN?ctYj1kKn3%Ai|Is7&y|8aU(PQ_3<)UH!IvR&o?PzGcGKVu zzzgQkdpbm+Uq29aJlyOOdKd9!aA9kd;-X4TZhU1>Cq18S|GQ*RlANCrR=w<1?PK`<*_ zkNKWHJH1~$<^sJ>eFoxM5obABMGNUd0^u8GlPecpQbpkvFJ&!ZebYHHn&r*DY$q`X zpZ0+WaWSAm&LCC2A%;<4S0#!a!0uCpi=*U0aj@+`R87baVd0(!WlBoH_%(4|@!E(> z6SZ=9t;Kl(Od#25n1dpoaF4P(`zYNnpsVH01y0M3XztB7KC>a@No@X(zKLfX{ego8 zOI9S{e(+=Fp=><941r!a?`#bBK@V~U&G%eCjG{thV3QgSj39url$s(}aybXFi}y@7 zpb5{L9GDJ_zkwKz=4FG_&Mdu|R^nXJuM?ohr5pk;{KPZKlLxQ6Y9;?9d#0oS6o44* zD$N07c2AxVZd`W{fB!d%m1{ubPIsz zTzvDnuGB1Pa`tL+5o{v`(^C>&|C-Jd<;yt!AXVfnj$y)K%&7W?p@`fv!zSeit?V0? zN@JCS(Xwh9F3-Ua%ZIydvJ(r;0)|K2;hUPd<_64R(%bzCR_i~Svp63*47^zFO$FsD zy=%Fk;K^tMJ(6&|af#<)S&E2vQBeO;x>btJ-#yB<-d~j*gubaRCU|;S>w7TnrGOj9 zOD3Xh^=NRSHHE3?jR#)f0sedrpiTgUS7~JLSt(L&gn!9A5VXTt2otZXOIs<{3BIbI;G23e@ zDivPoMHiFvhpp+cB-s>FYCm4Jj!wyLyxi*DRY2ZE%=qKd+g^#;vt%FVVvd_;1IMbi zu%jg@M-|hV726vZ5L(R6qng(#83;(?Ix}uNgN+()`-W15a^}DMm579O6OkCr@&wVZ zxG;!I|GAUWzT0IfK&pMDM!SX}n%0-5GKkcaOnb5MnquqET?CZx8i^@caRhh#&N6AP zBgWGj^DrabM1R{ymN@Bds>00HY!XahIfcw>(wnE7z_ycE!n~9~gqkemkC!`j8nWaG zyLKjM8#|(!GeV*TSE$`?%CG}H1bR(GpP|0@heTu0ql5KOS@{EMrPd6f zv$H8?=K77vbZp3};m6A}jyEB|8)ZlgO~vd;qLA!xWYiYW5-}c9O@$d$ z@Yv5m=#8;P$Bg9~vu=D#A z7RLiPKCI-4W59IF>qjW~>?@2SUInCBqwS+R4-5?)n=-ow=&%8Eo+yG70}DkOKtu;4 zYR01xR4nI4AFWutqj~GeSFq&nj^QVqN%~?KMi*!|Hhy^3GsnFtG)=vAmSv7eTXe9f zx&RobvsFANkL9BBZ6rkG8Seg0smXzc4}f71{S7ig^b-J-1zKi95cohHOMep}oIYuu z)$FJ5q3FR@VDb*~eS;bIV-}jCsH_~a1Pt(36cH*aDsU-u`)zUXVh(rcfcUwZtn4c6 zVyj6om&GHT9Wi(wDX` z1#`G6fk?KC*PsItk4cl0?Uc1@AKXMG8bt+fgh;t@_<1#$1~;;`npB&%rXTJDvSg?A zwHCPpkbGOUo7H`Hk38Xv3;oZUu4tHeD0<5MPsQVBqIKcAE&GWUzW;HXBF4wrc+pI3 zyd*#Z#;XWD$?iPz%tcc(f=#{_5chb*;YcrdB=82wabbI8c!S>Y`(-w}Nm;(i2>4Ya zou2#{=dakY0E#EHeb@s1SeNkGF8*HG?+62pEncP2ie)z?4KYWSr1>9p8*?U^95 z_zF?3I)M>>lwtAMph z52(7bT6O!{622^BaR8qQv0hI&AQQ>@PB&=9ll_JxB=`eua1UeR;+XW*VM@WXhNgjg zR%B4&BBsXGLf7UrqPp|)K^jS=19<^w@`!=H)`9kLWqCib|OJET=lOL7Do==+n(?6o#>sosve z-m2oyA;bUYDFd#lhU(kl>+8;Eb{Xb_wF#94~-hjqUd0|WzG=vPw6+IYj=oMsu zoHvWpwLa^3Cs4zfP_15G`pRtTow<}x5Zqm1Fn35zllEIFzOSUh6RGb(jZMA@xCQl2 zyMyeedKz^Wy87}o03k84$(yBHm4`ud2pkF_U!9(`HnJFwUiU12hv719H|xtwSD~M7 z3xUs?yo;bVHB1t7N@l~M!tG5BO{#@<8b|&#UB)y)8+sr{R*g;EVul@TpTU>f|uKdS8>0^k$`DxoZ;nLPqkMySh(4KP|eiI zno5|#zMaGo#I6lJesbCfXJu7C>R zgm@6#cA&)>1;kqVMikSQYyfH7^_^8OFf}i9q-G8C?fUE#&j=SXBF}XULS&MfO&ZU_Ts{3RIKS$g=T+aKeK@X@5>FPdx zj2d{hDo{Q7JUEYK2I&$AILs`~4+#m(m8J>$rFAQOpk$9?2R-y-t9xE+le{-lJ#=(m zjGGkE1bcc=#^h>HW)qVb-s|Gx)n5yxq`dSP=}YR5mC1g@$?8j76%;FMfI_pnfczdh zpU|{#^?8NG))|;P(fu)Qriy9!A;cZPu(`E$Fp?E-A#1Y+T7l_K02?P{(DgYxw@9Hu z{PdFhfLs7TJeCVII?&4m@fo1U|KX_o0YL!>hnIu^GSoYNSX>rW6A!J_}2T{&<21qd>poW6jb_oFnQg(NozTNlEo6z6DG=bC1k zgr>a(Rv#eQXt)qt*$mth`M~XL_Kpns;MF3L^%R~9);J32bv zN_jO%F-?iN%{|xyie#(PzJWOHAIf-$(+VMu*f{x_QhO+Ho@CPBkBIP1i;J@=I*O8; zK-p*#`o<)Jt-w@;FOn_Qm>&?x$shadtZ=unskFqh#{%;#!yJFd;(Kh74;u#>2<;+1 z0f9ivDgyc4z$SHG0DFHG9~!c05j_5gHr7H(C4>mooRQ{q(u8%K#ECAIVsepY&?(Rm zfZP!CowvF_N)Cbi_jfnY2=7kl@BISvYeKPLRDWOV*{Eu=XpDJjpG*mhyU}q>7QUH^E4ynjJX2B zZ|ygK#y_0lI^C{9Hp|7SYH&IjJe?IGRhk`)g1tdyoyHnbzQOf z$N>-s*070=7dc)w{$Z$Myb~&XV6LlgrZ6@U5poFR0SOKkK1-RQbG;xuoYTD?JLDvF z+RHPp)0?D1haC7O%2gWlgt;7;Cos@$ml*J40n{M$^<>3KqrqdFKo#)2v)Y8O<4t0y z?0)9K`?^b(Ha1;AA#&{r|2;E6z1wlDtQkbHktHVuD7-i`j*Yk?Z0$`xIAc-|A;3D_ z`=psY!|NWOK<7&8Es$9~H@Ql`23`}r$quXeutV6o0D4LK5_BS4YOVoL%6`N-k?z*X z`$*%v447ziNw1?Am@b+k3k+dkia-YM!*Z;8h6j&8!f(VIu@OB8c50c*XW(poi(lznyzi!Eq(MNTR3w->gsnI7a}MJO{`S z+Fq3Xo2~2!#nS6PTk6k5_c-K>i$#4*NU%{UL`LOnTC_z}%4g@M9u8=_l;Vs{T|89;CQ+a3{1+*1F?(=p~^0 z8*N9~0lA3FbMFmjMAX_;y`PhPDAmF0O8chNU`qX-jQR8`4U_wGCB1q@}U8eM*9+7Qn4w0~gvfDQCq zB?4J3QG>BeZL71kwpLCZIec;wnny3)7A}rnIvB;uv&14b71zXlzPvbIC2)|6#Ha92 zbSwc1pRPq&7_WfVP^h9GPBAVzdS*n`n3$Nc-Wf8J7-LOFW{gzNw7>b_y> zmxI$p0@X*;diB;=PUZJez}SVM$g7g$R?zEvKy{pvKEXi}0He5@9gtjGlWMy}PhgtO ztxuX*V*=TJ4!x$D*v zke=MQ*~y=#uV?D7BrOVGWthDN88deSP8oH66(}x&RSrwrJtNs=O*JK^slmXzltdA` zXy%X|Sxgw*Z1j#))_%edP z&SIhbwxVWof1Xx<#jfWrQ3~-~9ljCAmyNzxUY^^9)eWOoFN~o4CVf4O zscb-gqkMiRCA}yK@J{ZD*8rjT?y7P40j1uU5(vIwsr%}D2B%xht3DhPf%x_sk7Wsp z8hqxCc|P@u=LLEJf|%|{+lm6{+_XT;>etq;I|p%EzrScagt|Kb35!cUoLw^@pSO^E zqhaC{deU^-h?uTs^gdtvQr8#WT@0nNN|cXd3%+o9ae9dWxT}k3!^vtwt6?~F(h%Hn zB|^<(^?4(annf)NS)1SODR}!Ihm?N5_4`PVZzsdw*Z5!HsyPm7OS?G=u!EBY*>8FP zT(0FBXc9A5^lcCTy7upnFCK@mUnBw8;PJ3(H>*AxagFB8 z$GPunySRW=5h1jdn|p;O-W@ymMPN7ga9qAR0TL*RrB~?t#8>ExS(y`Opi@v`@pXA- z=FD~?HHrozQta+)1sJtb`Hi{Sata_O=%@lZWf=+X)vT=kiu%SlM81RO~5rl(d{H6QX)~RsMGYN^RclFphF-3ucn?6Lb2{A!~E)VK>JzM zWd0+U0vLfd8NY)awD}4~dR4ULSav;l(-#2=m$&8ti<9&-fw#|{_E*cZo!RO^+s2B+FDKrv z74G)>(px8_yJl&=rJ0r6@OEQYdZ#P&xfp+)@vXsYAQ!wd%YZsIEiV6M^y}9QPoPw$ z#j>*_!`qdkozJ@{2&=6DWverzR8`rv@8ts_Gg+xwBjPhN>&@unVv|8}@?jX=F6I#s z^~lw`?`F&Z-P!cs0CBSF@llH0FCA-kzGdGAf-`v=x)UK2G4NisLS-+3HhAjw%YsU@0-MwL|Qqf!hCntcTxk1)ksseRX~v`&((z?<32iKQ|x@ zlS||$_Czc1qrjs-3R_-c+s#z;ELiHkX-jBjVq*FnMh^7Q+nu#P(|@&xY)wEGaT49_ z0ScnbyTU0mUY;M!WrkDyGL67l6Jildpm+94a)#P0P&?1!-S9DfbYz6Z+uMy=hd}v? zmD|Vn{QMli<)pI8%E~k5i23?S-y$BF?>E6v8PEy+#p8Q{ZKa8XMw4lKK<%jc-dyd% zbFzC53k|i^MYzBWN1?AS0Ra~e3Xo7^HFpaZKq=~FLvcENh zBi>izv|{%{ByuG`BYfr@hM%7N=mEeqb17GP9$J8+8r@YkfD)BFN?%mi=J`?lX89<; z*arD+6HxHk_T`u3N0h@x&8J=qO36aO;a^jC)ChsP7ucq5MjBa#W=??SSaUXw$&79M3sxJ) z1PU;|MNvAj^B(0@pkvPW(%CW9tzQu@0u@Ti8SXXvq5Azl6gPi+i$502LyWvwfw))y zv^Kxi-B#KQb14~cb)WphWQp+s*6r_uUO+1)KC5dWFEtf<1w^D~0B}0ly(hHgZ6^pM zOvoj=jS!&6O{KoaPKMHGFb$+p>g)Uo|UtsR#XnaeqZ70H7rldy;LT zrKL4y1xxil2=+#Hkd}EMO>}{RX9iUg1g>)mep!2tTKu^T%fQ!ZiJJ>}&uj7KS zCy-Oz*)_MY=mmhIPG+?iJQ>^z!0BCH0Ln-L9jDbZC);`matZuYWW<3YS>U84jy=z4 z_Wg%F(~QiNpu{}+rLpu(85_MY11iSCH4JZGoAX=C2mj5y*n|S9o|yysng@SbpR|v^ z#J%=!Egt>)&i|oFP5dxub?-~km(=Ja(v-56ixDzF8Iwa%=`w{!==>fIkBtGK#jGju z%e%+CE7a~_LFvcj*YG&Cdk^FgJd@2DFV?I((xu2$m3;r7EX0_F z-`G}@z$xCwxR24m-x9XJm)GynnC#6%=jAKv2Ik8YZ<*HFDtcGkNRfIo#0s9w0PCRa z%IH9JXY?S+bH^;^%CGw#?Fy<4_@7^p(XD`@_XjA;5T(#Qbi@Kla3xEA%t%%ukKpy) z9s#oq(02>?`0;t3fBikgEFG3S35l&>gQH`qy_@?hO(=q8$k$_=q$ly*`uWIHxUrEP zIr-MR7g(aVV(VBB6A63zSQjMd4ZO+qfzmn~to(~hrb;g5f)Q7zGn?HM>x6gOXeEd?B8*p3%#dZ|`-lB-&_b%7e&P@Gtb);>Nh z%AYN6JG#Fap!M~<$JD9xM?+-%pfBe$OdqHoR zy3@2m5~-4V=85rAmReBjmyp@(Z9YJ(RrBBr$7g?rQ2H`UZRCYTU&`e*N8Y@bO#Wl* zQpR-)eNDBXK%n+k7!U#fVmdTYS_Doij7P42miMnxb-X9>UHtDmi_Pf z`M<67ube~f?5~{>H?+R&m?pnDp`LKPr3@U$c9|ppnTQO=*|;{M^{gGf#62*-4s?{~ z_CFcsF#o;_LBSr*loaU{e!`KK%QRJvj8QUVsBkM^Z94r3h~plc93NOH5K+jFiI`jnV zZX5@PFAR|Wb326nWIC}AMX=69iK3rb`}R>r`Hb>*Yyr<(AJ_?Uq}_?fo~3ugo+i`x z7uB}RQrQ6&6}PGA{eG@T9WIY9xL2!`xT*q=@f;=K11PhqBx3*uMH2Ku^`Fz4iwEH7 z?KbsQs(;@3H;d^z)xw*_gAg4>hcIS#a!U@W)+Cth2v8c(h~RcWn^|sKZH#Z%@k5Q$&p| zTrpHCLuI!?F95x7$T3TOnpWu~ZA%)DAUvu@@mdUGd6Q8(@(Rh{#8EoG(K%0vP4mLv z1+)5<|Mrq|AjKWQCpEXl7SCRNe)S#;2S*~Pz8=E2&2>_&ow#S8I6pnV+24!s5lt@g zVlX`3O-YeGDA2|sHBgc?_E8{uu-$u$oSj5*!=_zSoqSNaUA1dNH)smVgT)s?6O;I= zMRA+CBx^?(Ziu&9zCiolfL$*t2l2wWvZsu~T0smn)#^^> zpR=Eht~Vuh4sIK5$qo@>hSu5KjR=ubv^>wR!_oeHBI0W?O>~0j zt(JKZbB$?%gU1773Qw<7ZJ!2Psm-K#ldg!}D9zqigl4)MDtx+4(00g7d|XjgL`-gv zC97KM4j$#4GDCT`s*JbRdr);Kq%N95G!><^)?wQq@Wf&4>sRlX8*(M8yw7kI^cy>DS>95wBU&e&& zafqxo#56dEw}(UNbVML1N`!H(hILVjVShVBxX-H;TMYKn3A!q>P#T5-s=^N*WQFPb zhR-bv9y}mVM-QlhDrLHxsvP-ZJ-qN#d{F46Q+zSyrKDE}lUQP9^Tcwh6i%wVMezgD~)K-2B(e${Ujf{pDw8*Ddbbe_i|g z=0691LNA}te{hzLB{lG9QEf2iQ$6P z85Lrd+x#a}xp7wHEU!7XLCecb$b{J-x`D3NZ&Slh626!S5wtAy3Pf>#34E|KN(}s( zT&sM{tYcIvu^nYi@~TXrq)v-`Fk*blT%{&x1M`15%rm15z6}W9rkp+X%#6 zj0_$^2N={A^9vsp3?^48+s_Q3-`9gx(4op-Mc=bM)BYrx-BT_00_BlF)}X;` z3r|B})6kHeWQEW3+P2n;={k#%9cvK9!FsmHE_Vgz&VTf!c2(qxb6RO~$zRMoj+B|f z$OpFr38{S;iuG$fXO-uuw?K+FrM7Iw<3@}xKewKH#+;%&M)ppY6dW@q?qk1syBCd@ z@5vB(aEe@|6jkd<7<_(a7Y`y%K#V*r9qsPYI0|?KDeds*Dt#?g+InhCS9}&OU$c0Z z#?G;@roj7w15B$W!^H+A3xA{Euep ze48v6OSPr0i$SK-NZ0vYBJhP^_Ff1Sm2g*#`%tlA%OXTr(|a`B%!dYf6Tp0zR82bh9^F_+ZkpRg(O2*cS@FNY|JdV9h(Btw`EjpJYV5~%WT zu0tfumREz^GFk8mzWa1h6c=oLf8pJ8Pe8nISSO9F2PIG1jfL0gOmcfd!peAiP?=p6 zS4zufVg31slRXk#IjU%afpJ?YxuDV#nV5)g9{P^o$buKOs$@d(@ z1oL?FZj@QV0EW#)Q?NHxEl?7Njpwt~*mJK1f$vM(hnxMRJ)M?j+KsOc%3P+dgMYlc z9f*4n9B@g$K;V=U$7wWjY#bP$yF!F$PwLu>&9EOQegDBQweYPk2W8!*7l9j8S|f86 z?9nquomC^ayE&OT^RhGF6p{9(J(TsV$u>-~6lJ*nu)aZp3ytO?lg>Ilp*C?1?cPdc zf-lXoH-i;o@jw`f6T$FHcey$(Ho0>A0%;pCJ!--46??!5x{{T>Ymc;kOGo$hE(K#U%gCz!Fj*;~a5;Y%O zT`DexIZumGhL>#~#j) zg#Nnr_s#zpjA>)+4NMIGbV>F4dcY|9%Fj();3@5QDp^+41|^FJd@WbvWX9>2gYYaC zE_BoU`T(cYWUqNIPsOZwH_dE? z9X;qEZ7(KPdCaEK-aqiXq(tcQq#%QEn*4VB^?K-P^;G2#mBInbLu21Ewa7-U;Kc#^ zm{Wz-!L;O4xyP^E29{o^G=QiIRCtPy`e=v!tBr_;XQ@N-MXOh2hL&izlV!J92+fF!_`5^fEa=cXRXqWMWJ3~7339TPK-u}nl*6yk7wNA zIlcsENvj|)MhEuzQhdy$4#j+ zC3J+_Bno}biY`Y)p-=VsH?@OCV5hJsa`DwI%+f3xp_2p(!|0$mO~qG#6KAt(EIS7 z&&7ht1kJUt(|6vlkrHPZYj3+xAi~-z1)Qs(=NSilf$Hus)={j2#l`MU;XSLuvz(rG z1Y@!wMcsG4Hr0br=(*8$)TR!1>B?hl$~n^WWXA;6qi`MHstWBj1SzX32yqhm7Aa7> zExSz2{~2+n{vl`{CDL(PY1XM>|3y>oGMwC0x7pR#EWHuBWYEW_1)~Wt8GgmY-&H0G zGK??mrOng-sc5aSkG6GYe8gZ3A6VuOs=j_bTH^5>BXp{=v!3>mkDRAUOn~zP=IOHX zGj`&s5-kLwAYySz$QW5HCV{$Y`P6vpV^j`oF{qwsAJk#<1!G?F-s9`#bD@ZGS?*DB zTzjkq=5#Z?^9&r2==IEYN5*yC0cz$vCFmGpSbN9)xZo&)``3KhG4YcEI#mpL24oCJV$o11T5+N@gxPU0bsz z@u}_3IYneUnzyjh*!JY0hlHlw@thu~ns+a+vde1JyVRCz61$yrVg`p3aYa3%Lz*vJ zHLCYlPMP-3qJt%lV7Us|w|&|o7zSU^*lkaQ`p;IYZH{0$r~8f5zV`1K(xAmU;R6w@Wh8$bHbxfb62_nupMx4nCrUkXkIV;NR*uIU^Vw-;bq&JWl>!!Xt$Cs z%l4o)d@N@f+}@!&avw|}Jrx&<+g?P566{Xge-!w4HT5XR?@nVyw00;XOIXi6V06!qwbc%z3YV z_~-$rA?Dnq7VV&xOxRNFAH`sfkPTrzLl?ZCVVR}ui2^M)U7 z7{<1?ZGmL$x+M>?ZZ_aE6dTEn&%sXM?}IGzQQE0_gDkr5F#qkuwhL~&Yd~)^12qH+ zO}v%cFB*&HZq;9CUUWVFZL(hcP`E*`+pn;A^ZyU3>6Bkcd{U}KJYU|ZA0`WJl7|n_ zlPsCRjTXNJp6y`?TC`lx^Pn3IgnhXN{z*3K3dW^Y$<1yUvIxZ+q+)?B3u#sB4vYGB-Q6Gs+x;?(B*=S+1{R8MfF#*#XFb>wX*mj!txU z3TrLMjUhtnLm|%*r=biH7xX0d;4Yv&hnON%)I6(ib-tO5Z+ZD8c>A%!$(tz#Ke}Q4 z#^{G#^K3BL@NZ_7q$B|@5~wjo<4S|EPSEIvdx47nL>Q*@C&0fY5DS4{w~-m9kk(oE zp9$}qJj!$;H~OBNFvSoAE_hDHS;Q5xQ~Q8(2zj(&|6K{~rb#-(c(v<$ULP!ZV#n41 zw=IM9`h%);Yq!4@bBHu|!u|a^mPOu7LgtHnw^!f$zx4atw7sCDeT^pV65o2GH4-ZJjeUro|OO)qj$c=(F8UO(jD|vk>bF2|CIhx%%+9oCNEM!C8wJl|zj1@P) zeck}=@h&J*5Je1`2GJesJ;p`&bfvu3l$c>Ejw7~nz70lvdl>HTKr=L9Gy*!al54u6 z&VJv~#9KMz;Tzsw{(98}{-`x8%ULx5nNUO$uQp_J0R=eAvU57jVgb;iXA6NhS3~)o!tkSZllp?(v>o)V@H9xIT3aZa? z>$YzZx|Nu`#Az;m$Wha<17V(-ml=D|OIYPw<$v=oWiLnb)RG3WA~h&8Tt67@nRDAn zI88*28j?N&uGyp|3q;fmCm48^xi&p|--0S=FJht{X&ly}FXpXnYxNhaelrSX~iax4dY0b8Z(D|FMl=x6*=WrYlTTQD1>#*vQuRy-I4| z8D&>J4^w9-(YX&5|N6ao0VnJb|Cxfs`(Y(eW?}7Qxlm}{3tU33+zz6 zKOv%>?B#3ktvq#XR9yh8JN@=r;&-T`QjHyR&7STMhMW2SHx)PW@iCLchx%HZfu^x} z8wC-P?fNv%q|0gfI!ti<#j^#emZ>4p&2f?LQp zU#7266v37)-Ms_9!jp&Mvn9x=RZ6~AHHAT907bCcfT+L*Hb>ywx&zuR^=;!E z@&mKaQdu|x#%6d(2cbRrQ4YLKCfx- zu%P$D1pK^7Ozg87SS1okA<+LoXmB8IYLgVts`u#f9ZkbiGkABB%1iPN;>T?_1tKr;obj>fV z9Epchu=ey^=%2>!e$8~#`{r3H{(41^<>G>{5BrQnk;N665hT+I$`Ke%{OV4Fec6GL z;`Q2X>%0>A*9LFNd{5*5_Xht#z;Gk|il8GR^ml;r_sxF{Ubtd5h+B2u!POPy;=@H; z$&;wO?V8vS!mm6q^7bG3upb=&<$cbI6be1Zn%nJ-jAyO_WCobxJ&L26qq3m33Mc zc)H}2Uf9(_j(Bl)V_yayI!h9r!d6>E7p~rC#vh7fESR_3NZ-w}*BqpXY}#J|yY-*d zf2J#`2Z6P(e38ixJOx2#krK$F@8O;6*$wsfabLfydus={nnzxYX$tv2URcW7=##A@ z_<(z`cG)@pG}IpgCWNVXtYX|L_3#fkFo+22J3oELT3&gC1>PVP+t0XzNL!L4ov=je zGnDvN3QPoDR%>xktl9=X*sDgNtM|S+7t4QYq;<*W2R<@uzVVk{S`k}m;9+lzygBv_ z?fAo>k8t|~Qf7bQ*cXQMH`09!8(1F^{&nf^d;c+zc=Cf*1+dPmEt)#Q>&A^qsDiji zC{ma})6Gz#g;ZLX^9(^A<9vOF-6##O3d!w>^uJZ0Eivg=rS(i^DMOsub*!$^+A`XL zwxSvTj+O}ZPbX>hwHRt!13;m8_I#DE0yunGkLpuLWvh zjA%UgTaG1l?U^547husW7)pmyIuLp;;B5vHB7yddu)SsQuyt$w`%G-#kwRVe@c_>-_cqM$Y==6gj%+39HRwb4HC+=@|Ya*Fu z0o{%mXBxu$|39;LFDkJGqSS*j?=c$rie!nL^6`3}%FESQC4E}88K;qcQy5&zlp>i{ z%40faYYH2_JC>}q41WikN@n-e4porzIndM;$ZN!w9$VY!a2vKc_|xtV?LGhCuFpC4 zwv&-8aAf~bRl>3u+kgHvKf`viwyezrebL$exUEARnDHd*%@{7UWK_mBi|~yG*1NbX zzZjx`2RjAg^4s|h@9#+766!6Jui3C1K0DoK(mzE3B%gOkl zjP~oWb1(Ed2x^HK4(d`ofEb6*U5w&edYM$l0_p^ze-l~f#!(@rXkP< zlcJ)cn#A=W#J9oVn@)W9!qRaRH?QGnO@eIclZ_5T<+%@sCW6bKUWgk2Pm?G?rz6&)1cZvF!{482_G8$G2=8 zrnc0Yo_}DQq=tFZxAZzd#P|g3hep0pYS-tVK7|`a2fJuMR!mq20?Y7vu@AFw;a}@c!+502 z;d1(B?->8m0vPxdmtD8>`ld^J&`tsgg$R(8SU1V>XL1jFUd6o@z%qVM*p! z2g^Q=H}d#o7u!L+S7h!F$%=%|v&7#Ek~xz)yhr8W8a(g!p&Q-7n7evv{L_4g;OE(6 z7x0dp1A$v=#|4&SvkU{bFZUSdWdnIq7?(b2Dom&OPoB&%(;NS}_3!D=r3EOL0_=O^ zFaHPL{0%>UIAEX|a6i;+JB^wz57xZ{euh@7ooi;nw1^*X#P@l}H7MpvD|=Oa$CV^B zY)Qsk%oGbf1F2s1fJMH$b+EB#c9#jLNmPbeEtQxbz0nTUB1qKG`g}b?NYpzjjW~Tk zrsR~9L|mGeqb=4~ws=w)=L<@zJV8lw;upq+R z#rer^Z}kgVo60@doHCV(ac|rs*rilDpoIAAX(qm{w9e_z-yCWZkw~t~_bia8srVpX zOY7CxGFQlcA>6R-pPujg;G_6LV?cYuPm49~$^7mW0&z$tR+U}aTc=W;Ijtu3h;m(A zw-c|61D#ZEVgmK*1I(pB1bJuOKiUd9ZXQ!qELRx``-9`7h9X0|)=2M4&(_2b@q z#hY5-m45qcZY8`rf5+rJr3Be%G~3)<5JGVL`w69`KYRzY-(1lBSl1<-MR*=lx;rq@2 z8_r~tQeW?dn){#KLrlhkMn;&=tDqa+VeF>x#(ZryUfT7}-c=dzsCujo{Sy#UjeJs}R_2 z25u-}{ZR~O(4p&gR$Td$h1ls3|9Y^TaZN>xoW=E<(k7Aa8tX0 zPyBrSVWjr^x^;>G{t|P#+H#>Sl-6UC(x+?o`0o80x2_RLhdtthE6lCb-5A#XiE;_s9bn^sPr(84yX%u7c z)~21!UT^jau26c%^mK*)*fM|GEH7~#L0`sbL{wv6o_C*O4bx`(oW0p(H75CAJT`g! zL%H8}@&@MDcjv~wTaSMol2ivy5wl76o7?rqj~ei+`EUgCI$)&FRT53{>%R!{)fB`% zcy*rGJ&$5?&emw4G+xOLo&JAE2@>+}=z6@oP8qMGur8j`&NwWbT{3SvLK&4)Z(eXI za>Td<0XYoCB@+UH&ewZIBKK16mJwu>LoV(=B{|+?qsX4ip~@aZat1H*P#F?fhCtDA z2gaVgs}Z1GJGnaagJ{?W6WW@~Q({aPHr(|%G#4Ndx#SA-Ua)EHKMK3Entkrrun$m^z6 z&<_c4eEtlk+NFx#9^AfwBdkcywiVbG@~o=wNvqxMNPc^l_!?mK9z*$w8xHR>%)5f8 z#YGI6dk|$KSWvPC+Ryb<)@^cA2pL)4aq~O4vyCPk4~n(5jks)uhVdouAm0|kRZPK`i7TYEpzc_NNx zUH}KP{7WzP@pz(-4)&2{B&&`>(NWX*Hb&vUR$`hT{#3c8@~wEB?3;fiM^zFr_pekF zE1cN>a{qo8;y;{rtenXLPE7V&8ta+i>9fpin5#?Ql_Y3QAT_ew&Lt(x{DD@ZLe2wc zu}qwk!kPq9wQq-pHLQ@Q;GC)c@i$fkz5Q7thaa958{YAez2sQ6Ns+B^tRA4&Y4kt2 z-ln>n3Kb0C*8dWVb!+u2np?2>OhsF}hv;NS87l!PfQzfRqO!Fva{6eEEkU~71)hrn zRYkd+(>g0$z0Au11xAobUK)xY6>l1Ar++@}RT;3v}If+mV+ zyRI8$A2i*$jT=gN<0Rn2s0qaYOxn~1pC@K z5}+XFlIsbjo8h?w8hYY1&l#_0Vl`+Q4cThbtU z?l3#eDXQgi;C7U&9JNhqoWb~A-C{o>zOKZY#ST=3zx*k z&nMFUJ(1qG$MoN3dCambIEmb&%IywJ*DBfjt^i4G zDccH^aKI)~A^}QXWp|75f5(9UNz>|cN8RJcGiSC0Q->!RU7+>;n84a+WsKf4f)b4v z`r0ybT$cgw=6KOY={&CLQ`?#QYy2u6i*FyZ^Shofta={`s*`tdAsyG2pOD;r8U($w z(5#$We=FN-2b)?Xq?LBYZtN)up@8nsE@|^lQ_(|IuZ^ZbN`lfGf~%?0p|^OLP`Kqa z!}6=K^ks1Z4joPp4G_tk_!ol$3tQGDO6Pl?ff3(E#QM;NMqtC|z9iNMs>JdF*IWl= zbk`vP*f?JdrGa$m=$PgvYPvm8oh@XM>9R${&PWgx#g4W&I)ZFGpz&Qxrtx-)aXl0v zma5UZC=;5UZyx7mJ(>ovdV_m1%NUz&S-fmAoXjnx;14#tX9f@qtUJZ~y>=CF@Fl;h%8g zvsXe|8Zs@2EQ7jVqqA_Cr6~@?c9I%YeqLODNklqOI++eUJ5q^Tv#kLIou=qCSn>er z$#!Y`L^_V}Ig!g?NwbN`^<>jektc?14aJLBJCxoU?bbErLQ$KEhC%Ci^U%;U4hmYZ zG?0Yl)cMH`6|?fXf9HLI!S2&tG)g$nN`<7)xMH^Mfo7*P4Wp@ypa= z9ndE{T)3ER6|b zifCCw@7o$@d_GlYAiDDfX=eD|wxTNRe8#VzJm@U9J<9PiP=|@0M7^WC;Z;Q3@Jg8K zPV63~$(#F#&jeu|sSuplpXcxWX)_wKg0VD-gLmRpN5a4lgAD~~9~;vl;-Qi(eCLLR zGY>hHo=N$U!j`k^#@+TWc`}BY63;=Yq->=(QZtA2~-(((j(UP_-V9$zI2g=fUK+9O zjx-}U7Yea0`OJjg31yZHI-v{RsU?RwnRZWqmn0I2)0VCApkRQZZwQVi#(`Mnao#XU zFDv?N9dAfla`0yf3)AEyOLI9kv+Ehkdp|x@tmSUR0V=N~G$tob;6b4|-^lmAot2VW z&sQcC^}}-}QUdN`&j{XDzHW&*Ib&V5i_~qw{wbNVaK(I%MvD7bQ+~@9N(CKi(n5Cj z#13LvIZ6+WG`-h^)ND#cN(>dYwz$*gaDa*(iZJ2LiOY=Z6xG}9mZeYFmol78v_G3x zE|BF=ngkLeRO#qWRw&h;wnyl^RR58&ThV3w6b#=Zb99w;NIV!)HzAF@NzAuT38C#s zcC`p_ixCg^O$8=pRUD(kvTGXjR;S%TWZp|R&XW!E%pkHE=9#WdztX+kkSW8{39Ifo z6=6w9WbGgC;9vk^q{OSr#nQ_|<9W30%6ZuB;`zTKo%`Hle_MhZew{LDM!36$ACP*b zjn1!PnN9$AK#p~45OMiQ!`M2|rH$st@Xo7qa#K;7z39u2e(&ZnMbF%B+0SY}$?RtJ z_bjmeC@~f?Yn)oJ$F#p3_Gf6(e*aY~+XF8TN6_2{QQlqLexKaFSA^^}O2sTs~uzu1mu zLiUoI$LJQ=ghbG7W;gFs)7)!Tk@~ESicR+EN>$h__K`u-`U}^xE}4@^q0j1Oy2@KI#vfGB{Xw3k z04dsoE6qwvTGw9|Z_LETPJ0%!)SnP=M(pdAyq7ify&ZG;y2~>3@+6ExansS4mK2Q4 zD3?>j-tnPR!bc!Zg95(0KZ;%XV`Q~)2+-p zBHVsYWCCj|Egd>swqg19uPP2_^Q!Rt!2}JUPWuVT5B>Q10wBvY8M=D2nl)T%cqC?b(h%?S-3%ODZo# z-gH5d;pW=S!_Qv*JKnRyqsNn2r-`jpp1 z!d1VFdDp2Z71MuOUh?QXpz7YjxY?815jsaK{pe5&_!l+jhN<-d;w9rHT34!j0h^z# z5>r-*>u+ck+B;c)MzwryaQLuiy|O3#m>{_i)@+IC_HzukM7P~%AhWq=U2AQ|7dPZq z`|{hAoa8ul6_Q2~-r)-&isz#E_tZ_2+`h%jnBwA0)CTy<5q{q-I|dzE&6RVTonF`E za$X5Q@30#{-*JFS2@Sz|+Vkdu30K5}G2@ibjvGQn#e&NN->eg@mRHBi=5z2qWyC2i zyZKmgU!z_>S2S!0)#Fu60}XDhU8$J@O{ zjTI9z+5Z5fFX-q>EC;CsR{&iFDLxkk(%+UkU-^lFfkF$VK2wb)DNt>ny=dgLP%V9T z-|%tahoM4Z!Hy-Hc|M_~XuYTq;jG!Dsv0h&OqAFt$?Y9w!=?A z6s2zD9`1(~-BL*2QQjmgx_!X?HA=4GN&Bef4Nu*6GMW&dZ=RZ`E~I&4nXo-{*kpm+ z0>A{sI*n!3xj)P68+_Ng5I#t9N+}eh=y!j)k-drYJp|mxjfGlXZHH5U%b zAs)Bwqs7^*9Nww4Cr7Cd1u;IFIwuSWai0j}=MMHaZ`^9p#;L7|gmxRfGe8kG1ciw}Vi&HZIvhreHEp zNo<~I=+Z55jq7gntIgL&%s$UuaM@>L3adn*W^qI=m%<#IvrWyP9lSjGV$WXLw7WKa z99;QEPZM*!fi7dmXC<5|PrF*#Ox#d++Bd_g-IKwc1td5O23(Vz)cX z5pI{$4>?$CeeZ~N%b3~rn|pZSyWpPCeDaQ{$mM2)Y009`NSfA|Xq%88QN;DE;H(w8 z?iP<@zi$taXMR=Bn`sKM>@N3;Bzx*H-0sFXK-*M}q7a76<3BDHjT%Dt#p`d@a`w^= zT?;Ll9n|%cC+rMxWE^%WN7L1B&l&NYSZ#OTLs86Mr(%^Y6jTElyCS!~0%))Bhm(i5 z&^P?NawjoOXo&JBZ#?t({eW)VB=ocTaFQ(**6s%z3|o~lO#t{? zpxEtidoE5hn!i# zQnh|qN5*%%U9heyfZ^ta=aQ0VNiJ$xmD06e2Y>M7YL1e}yyb!6<7%I4`27lNu;|rg zZy7t*d-0fJ?3LJFgW{483cgu$Tc`$nf9N5c&u}>3Ua-9jEKYQNLMg2ty}BhWtF?Z- ziAWrmyi*q%tl%4&y65FWg>1cD$h>mq-AUpFA27{TEXTAjB}x=Kp6053B(7g__=3iV z*S0|T>twfRY4AvoPg!@$?%tu7W>o=={d6Mdc9ngK6>}N9mBe~Ye{v6hrHZSmm{8R4 zY3KlDq@)k%+h|nSiBI_V&KuPKF^m7<+kb^B(ip(mgan}SETV=`sW4)+x;p`iA;(i^0x_Jg&s*5xiN4nXX*Ff9bUm&ir|&@N^v31eARCy_aRyaM63o zqiO2IEGBQj!mIHN5KM-3(q1mjCytKixTY%=Mp-Mnm)0!YO<>Kayl%3?V}~UvHqPdR z%3R1Ejjk{bcg}mdw#k>9^A}(5GHrd0H^{qTP`myO#+OHX*{8H4!5G?&%nLTsYd}5` zK_a%2Ac(C+|p)n>|-eLBjCTv6weD8 zaH*4P(UqR6Ja61zlbb%6DEE>e-fWhx*U*K|HevJ<4r-K*XWT3G5Pxno_r@H7s!`%v ztTl$%Xcw^jzaZJ~0*M>gkFYT(ioT(M!#^Iu03{V!FAt8Ue(>*G?vJq_o$?RQ)j*X> z0;X0^2Xh90xYe=L3ZEQ^p-g15G{0{xL7$ z;g|sPy6%Hqa8n~L&&?dGQK9rzSto>tmvH!Bq7o zH{3`wTqF9JvCi>~XOs);*WXIGkz93M8XTzHjTH_ZV=aFO{ruEdze>GBlC}=*mgAPi z=CsR0vraY6AN|xoM8>@(<2dViP|8A{IalM`6_v+^P3F-ay$TUut~d#C?~oLUJsE|P z;*xbz%`1gW35KA9Z|RM!tn})*#9ZWsg7ZdMIPUi{UGE405s?WJ#I5wnMLt~=#@}RY zM;(HN#mh^ADtEJw7N^h@CEK1&57yg_-ULpQh`+gz;jUsgVZ_WVoQ;%kAips?4?m7O zjDC#zll=Y1X_)_f`aGu$j>KR$SEh){-EVQ|A-1d_82I= zk=9>(VA$jneO0whG4@VuxhCZq+3lNw1qvv;E<2O)(=B^JXAib~s_8BYH{aLrY*&ql zbpT?`q&)wAztSNgtzgAjXd|hOcGowXF46B z2`{Q+_B8eT_HZo)qMA+klZF*#^DQmhJH8HZHH!zGSCUxo?7*=vr4k!$Qs)%kU9q{t zFCT>={r!C@NB<9L=N;Enw)K4ril`JpKtYP2AX1ek9Vr4zQ+fyKy@L>nfQWQK5b3=O zp(O|*fFixu&?7ZKfKWpTf%h=?J~Q4sGd}k|?>~HEIC7GG_Fm<;*4|(9;XVIpMrr{& zs+ZpWJTgZq5$RQNuL#dKzhb>m@}M(cLXXR!b;ri!fWrpO48&`H9?(C%6PYUcr9d?d z-4tvdFdQM8(#?C%VMyT#FnRxUsr#GPu&4;;6_c{Yv}s)2J4YH59j}~n ziPE{U85ZNin<9xF51x6}ZhRqmBIw?w2-IgXITU|fI*$45T5NW+J1rZ@o);16Nn+*k zDe^^LipZ^c3lG1-N%Pmtla5Fud1{FV28A!Zj_1bIG$iDDw7#$3YOj~OFsDvyO-@lt zujpAPo}fu&f(OiAyR@3(@=LKJ?O0jSGKS{ru;f z0XAgvKsAX;3;w}g-89eY+`m7c-4Bv*94&1)lJdb3z>Kg04w7cs8W(Pn%C*094gR8M zjcND+Hq-}y_s6^5JNL(}I|__6JQw_|> z$3bEv2)9RB;aLu~@&tr(np&SLoE@h0=+|zWk?fbcG54CFUt_OZAJKZo%jO9_IYD@g z7b%SOoVnGVp8XZ1uRIO~+bQaBl0!7ym%Dae@%NqsQH@i`-pJ5ZOaH`Y|Ka*_WyZ;h zTJ6%6?6$itHOph@kiHN#!GZLbCM|E9wioj9!!I2Rrc6f~9(u>GDzI0fESKXQHJ_dA zW<_~$4TMVkAxr+U*CUJN2aqy;?<{3Tw)H=+ek}WEvpmkKig|;LkY~@aEXM=FrrZ1% z{m@rn_D3ntzwbP`4WC8Xv^*^K`%!#CkfN~0_R1;q9~1OeNg8t=cr1;gT;wh}3@Y_B zP_8>osE#vLv;dNXU^yqnu}<}JGm5h$yUrFp9`7(*rC8U4ZvZ0M&dz&tiW~9aCno|> zLIYPDv#&KD1I+U;T8i9`v+S4P7Jg?2B8iVLS4{)@uS=5SBmKiIuDveD*QdN393HlMa{I zMZs{vFeB^yohJ9u{C$Z1dxuv9x6-b=u(;=_9=Ci_j>63Yc0+7DZ$K+6`z~!|D08W& zoetOQ#bcogVL4X8q`~EkfYjuHZ@Yx=E}-zjd9=kti9QJ<3!jCFzWT?K1<9F;f{VhI z*;lGVw1+lu6=FH>x=U6ZCN-#h8^CWZ5lwTJ54@pd;QTAM>-ysg@V6ojLofC2$t%#J z0h2bm=hJ)e&g_{gzoDuAnxfb?l)geCaNPS3j`jmV=ggO}j=;T7QW03X|4-ow?&s`u3v!<3f z+Dq7Vn;1=d8Y5cShHok~4;Lze{Rm+0LAqJ&U1k_m7!-J#>7P!r>}ChPx)t^g8@8F^ z@X8w6CGLH2Mz}uR&*Aa42PZY$nr$wIS9vnjD010|R|eE&Fw}@BW4*nF!D?4%=RmQ_ zIup|}tUv`<3oB#(#`oiY-(LQ=T5h~#w>rc|5Rz_ease{2P}LCP!qIIVr@ff3<+L2YCVnImXSw(oHYi@G-4Td`BfsbrC&y! zu*$5YE25goWTK;KwV!M8-$%1Q@}QJDhD^9Ff8C?*H`ThD@s;DDr-=2zqV&#?WvBVc zt@7i2Kh9y>P5dKI87g0K(<^vPH0RKrF*#OJ8%}D0&&Wi%lLPBi8k00?QT5hG=kU!h zAIQiGxGvCpfpMO8Qq&A!@B}-%QW%Qb{*kP)oEz^%@xCzF|M#TnmyY3ItXYOQ8Bo~*T_`0FHVoD6{pKa> z)a$M-+8gqsM_g9F*IM9iv^>^alx8lyC$4nYHQE<2hmlpnK|P)T8T z?l3?oq`av@{2#c6zvy;bXU$4(aB5OLG?^wzf?1@~Tu5h66VBl`5U=HhSbsFOX^oPf z1))ry*J~UwC+y|}2B3XuXEH?d9;T1)N5P@4YpC*x``DR7ZJE;aYL5o}M;TT2!+Zgy zH>N_BZ%)<@EbWhrGv^W~jA+bR?erQ+$UMnMHJ_WnQntB7l}daD@d}%2>#a=Q*Fn7H z6+I5&T9#u7ujv&N^}}wz3;01?wUaioNZlF5pju2FFI?g`SjbzndGpnQpJI@|wITn0WdXx)8nbCZf8>j6UF<0Nl1CkI zrlRx*rf+p4WpVl!ZR<{ZD6ftAJqX?sd~)Gy;!upf(&2}u59Av7BF&B-?p!6Z8md;-cs4l%N{LXMEdP2d_ zS$+7lw?Lx$O%e-;jcrim6$^u0=WaI!dU>|UU%s;GbI+eQINU?+3t#fLuP=!LvZqDrmgj*$+42Qw zf&hEj*`HCS8ZMw^A%|^ejJknwjweF6UcvL(FFODV(B&qWpjMJjnoz9Bl?L0UNv>-P z0D460YIr?5a3h&{Qmb{=m&<0Za+)ne7~fn=T)BpLAT~0UCGqdi^;ZeNzfKM-G6EWX z8Qeu@Egm8=9~d(~xd^O@Qmdzr6kX$TIdV5vfZ|x@F4|o?%o4 z5da5FQbX+1NRr>Xbqm3 zQtK)>OYS5@=eAiu!iO#JsPgqcYd@vOXgLN*!{+999vdUiV1)7r{^nEv9zgx#h0{FH zx#~YZI@iIdYJP50oZ8(wl_~nZ;FI(J_eXX5Py-4sJ<2W$e)`84{^CRY`d5AOr_HtU z4t#lkKllIB+mO)#o$}dpZ*+hD4-iGhTO@zupHNvTODI-8DY5HnMS`5s>cI-M9aTC)THWdP<5ahGX%kNxJ)~aS!RQ z0$@_(c}F3()V{|DsNGir|H*L~KMQ%MuO)q%0nFU_mRCQ!LmKe_0MxO$lj5e_V z`WJW5;D(MEm_C>H##ci030lK|5V$}wS)9C!eWiEV*+88F_r>Gb2^kVTN-gWy z7n(Pu#qqzqkqfof@n;E+0XEgi+$l3@x_Al-;6PQ42)nPy_>`skY_*Rw<@|jAzgJ`vE*vregdf8;RfY3L5qRF%Y(`(9+E8`>nHAR;m5>h3t>)E!2HIMUQW+_%GLx zP0Gn$iI>c9xTgAj_lb$GdG_g{zGjr~*4deZU4EN00ku+%J;+{n^Qaqye=C6xQJy1~ zD^b^a_wv>sbLGsr2e+MogW1U#c>X#23P=JV5{kJx+zrw*s`pkFIognXz5;Z<=;5K= zA&M*qt<9c6oO*{oxmWq12Q> zS?NZGt1+z$5qU-CbpDc6o>~oX^ZW;H$|;Xxitd+bah(jRNU$l$e9=;1{6#YM`&w-z zV619&{r#W73sux5f^%ob0RoqBset|XdvZ1jLBRyz%t6-96lGTTrM|~`r1YZluBe{e zz`%1Wqvg!bX2<*cuOOs%zs4W|Fo(Iog9kAWd3i$xy);4gb+8kaNqJox&RZOi(7DoWJ+3uJhUW~TX$#%Zqt zC%7jwmGJWLljdW_H`x2c=g2K{Dr&W79GHWEp$(V(GL>F_bQAA(`}f z<#mSLuBX`!_Ai$A{SxDQ{AUi&@1RzEt6t$B_sfywvU*utRWI_Dxl#z5ztb-GQSd7B zTWsXm4$^3()0X-pff?>NHK{M&R9LO`WBZ6;;YaupZI~?f33{lIVza--)l%oXwZOt2 zeLmzI=x(oglvDH)R*~T1PYdAZXYntuGsV(=cBTQsa<64o0Xo2o)?EPHFt9PxWPS`B z`<@9<1hhFPzBn@jClyEKsic=4KI*S9f9AbAthHQ_9W`)pdWyY*l{^M2FZo?W{h4lakSPg(nC7}qSgIjY(vt+|_GW-Fc#tuM) zx$m$m`ttjwMCGSXV_$mbzP^ALoN}Y+PUMwyF0?GgO}h5>Ct9Wz9=`uae84y>j~%8Q zvpsNTa4uwn%0L-T`d?_6_0FQM!Fda+$yA#pC2wy48spA9YiHbN(d~1@R9%&L#}*6k zmy?V(F;uMEEfV7C<+28G8FA*Qgn_sVBJ&T7ICS@M!M;pS3jO!;cX!g+&~bTNU2Fb& z6?*if`jcAg_cMxBI}_Q|@(LQ3$7D2#-1FZW_1u7Jn<>5U-s9U;a#%Ccm!yPWAxIIk%Gj6y#wU|+1 z+qL;q{%n%4dr<@vc;6OX=X2m94DcC;mvAj;7xiOwDJ*KeZd3P$-eWx z(wDm1`ylmGcPg%9X)x%;L{)@N`d8IYj|Lu1yf5uuO0*>mr>l#X(;go}KOe_{Shp zaIn(|9BZlsR9WV~mK2y;JfypSFZaN9k3ufV2qugC7cd)RnS%?t-*9{u0ji_LMG zN)$`#*WmRZ-bpr?JD&CBCO87piN4+|p5zkYC$*m&LW#mRDPzQpPC7mat4%D1R#|{Z z;H(7ub|-07aSsRQLYXHSh$_j)wr6EzWf;~zRlW2ELSg&(DyZnV_;b=fx+DJ9A!N*u zorX-4&$G*Q)7=|O0I}sx^@P$Xx==ag7=VBd0_NYO{cUzXd&~2M8Zo)g zS`oqf2tyF3F96w`-+Xo56ViL@af56)6(0y|S93;kdFi8`LySVy3-Bl*gHJ{>q1+|| zkMZk>S0?~_p#K#5r4eQ053n@Uo<0p5xNdoB#;vLdr~p3_ES>qOI~_MG9t{JpiqVBf zdD3ybfZZHpJ1-~eW4Wgv#U}zq?W=eI?<-7ytG}MfH0Jo!Mtt%c$WeIF2%vv*1%L}l zIQ74%b|daMFD6YByVGt@k%(+{Vlk zu7z)eT)im;fvDRiVSvjZi(g+LQ?dbS8^5t@uAdAwVxBin-O2AvC3{f+UrZiRy!k?T)pCpke0JEyKT0eEL z&$w#d05oQ1p*i!;j~b7*0mw@-;E0EXmzS5n*8^Ckf_jZ&&3+(g8>*Wk3c1a<3RGN{ zl59&eS(K96o>kin1Ez8Xy#Fa|GX${=;30+`2nQg^`SXj-(a=S`Ly{(Ykg}?XFov^){xGbA=i+dOG5?Dy3jtsz3jpEVE&6 z+xzAA`GkS^wj=Q zTel%u-YKUi0TO&?x!yaBGlN8C`5{S05YG=o7sl@S04?V6*4d+a@vc?gR8zDQm7~XF zmg%{$_1b3wbHbAe56;zVr2j{ydOp!}@2rp;>ueT3k^r+Y{@p6`AXa^;hL(hJF!d7A9P^pY}7%N!?@0F;`SqWUCX3cKaT< z?MO^VpC$))QrQSNOelw{{}Nu7&v%TzXNAM*6?I_X?3q*LSDzw~xEW@i<}*$Kl} zQD%I?v9YmB0GuliKr(+A z7#J85{vVU~9&Vm&uNEzf%qOu{lH0Z73hv@~Z3)BGP4j)$8~avTb;nV$6)bZLwY2L) zyzal;V(0x-X4vxaAy60?tYIEkLut0>p*3^u)ow3}1C3{FOO_ABlg}O(fw&D?`>cMklchiq$JkZfujHg@&fumhVpc>t|v7x zdJzAR-{25V=kVZqg!;shtDx8`8X}>!mfg?*Q^iim$;4`Kd~r01Jo ziT$_%OO@jk-($2u?gXF8dlA-qyGoRHPB*g0IM;NMd`KCW{Rt}0|@>#dugoo~0={hZ3KTn?C>oqZ?G zcK`m1PEq>kq|y=@3kw$f{#^79z%L&>%~RR6R+#xwDAXVT8I~Q)S4nWMy0&j?8}WtY z^iD%}&ozg**wZR=97b)|-m6p)jo%Mid6b-gn4{wU{gE^Bg6~pj z9;lrfG`BCUvuNOB^Y&>M!OO$Dw4?)od91lIoX5=`?gmh&2c61C8L3(j`+M={6+#bt z0#AsvNV6*G6YsZ{Zj)=*s;%IBKCTWQHK7_}~%pYTFvP4~`=YABmq^B`{yrUZi+ za7840*3C#!jh!}Kxr9wwj_%j07>Ru9@!AngM9E$#zdqi;LPhEncaRm!STg-Fn@jtL zEG?_o>l%?YMBtqBjMra@=~@%FT(}q(6DNe@TOWdcV)U9;f0-WOM7ZTijT_Hw{^!ieFbc$EAuz>DWU z4P5}TCX+Xi6)l1N@x`TJh%I8^6#Xo2_RqiiT6Qg;m|rfM;dWLpJJi=dAa*FVcpP{d zlW*6m@0w?M=Qc_5DBfC5i(9EIdFfP?j5S@W2#Y+ zo+wi)WJ3}AO&I{`>xLz;zrsUJQ_7Zw)U zh*2HdQlD{>@r`ooC6v>G1LSI}zKq8U3tahvi!!w}rUB}|y4 zPgkR6?K8Y(hw%6p*bz=s>)ykc1rKdtt>#oHPkVb`m4^d!=7K)m#`iu)Nz=~w9W2_- z<(i3$G|1S)wk5AYtSzEerXOG0D&b&;E50&R)n|FDwK8NDHAxsgv$qhZ%6KCS9uj8# zwf6kO*dw6+kbBZSs;FYb`w0l*<2q$UxhY}PqDM&CiWJNVFM{|sGh+S~cw8LZ2hJ-Y z_mW*$F!UVGTG893j*QM8Wsh0PS4@-wvgQxwl>j9thA9H&-BN&ERf|ABdAOjfZ}^-5 zFTZwLaQN%^{HON|P6%uatc*@#OfLTXWQjx1gjcO-C!e|hHU`iO!nX-u2D-sm!H{h{)wE&4Ko&zA+g#(;PQGN-N zZv?eRTb;u`E)|CJh!Q z@rs5P&hDuNYnW=bJzu5V6uGioBD3T3fNJCvv}&kT2!01tUuNen$GpMnJ`hK!jl0tP zCz?&)t5ebY)jGe$<)6$JSwLeAD(#tqfZ>izQH4DpO}`W%&7;>0U({9?c1L9~ufHtxQ z(3I+`s*->)ODn~nRC+ROXlU*+wEEw)QSZRpTMHfxz#@nE+ z&<%K7J-rfL&ywsB15Su$`1$M41unIbuz%)}(k2=^>czZq-JVPaNekRe-}X01b5GkA zMfq~$^>Ds^y+Wvzx$5qVr+B987lcWcxkWyf7Q`2#N2|6jTyzViARe?m0~)EgN?XOv zA#B4wiB3dCDsWP?(IdccqRQ&4LS+}fFbAqvl6pzKJ1@6T04}QcxH-hhWz~xh*)~U~ z*@5)WFZ1`?ct;&R2@F?RwxPe(R?$Ls16X18 z(zR)ciPqIkO-*;p!tem=k^7Ly4uElLwb^_Q@Ev;s+>WMFGJ(j{Y_l`-q_SGr6;ZAt z!+wv0#CvS=C@TY}GOtYBZ2Rf>fD|$2gkM9hIkHdKy1Ut1UVX;p3QAB}F|;tUU%kXD zm`{m?)Yfq^G+4XY>$5CZ&ewCID{n zc|vj5Z;wvYdHcrAzUGsXa{+UnXjSqmWF5N93Wz(ggwWMVsrHAYcdgKq_3MFHZ!zK) zko1ObjrfL=%(%+l1x+vlUusPzq24yF(d6kXz3bYl!}R)Z=rO-BPxuT2%34JDE#Cfq zQJ?AsKC5;6X?sj=(T00^TjDM#eGD_&sRU> zdG?d`Q!FZA`P^XIryo;EH$%xrZrN{qpGL6nf%|H$9hWg2C282#X*0ZgII922Flee% zVEsdHLIT0pkll9)QJej&J0EeL4yND+3-no9Mu|*8wVq}MAJZEBtv+h#Wqf~jrpS87 z{gOuv!;>fJ$vYe+F8-~RSIl|?^hMg35z*v$`Y41jguT!M{P-}M8gpqJzc!ZaixXi$ zVt!vxlPdXq#fUpHra>mp=Fw^!*fbz@?u&FY=xT*QoAl# zId44OC^eEz=XVLyWnn3 z;s<8!B#8$~3Vp|`&kHGq8w5mlm(|Iu5Fuyi)9atE*EvZhcETUne(?F?lspC2z|85% z#V@N(h_H~Vd<;fo!^9Ogo5QXOxjThCOc*NK;}yNVKlDA;sSf?>4p@HO&m_i5!`r^0 z!Q)ND`iCWw1Xv1B2K?y2sMtQ;Qe2xkpLqGl9&Ygd0e6qKV|44MA{?{#-8d0L`&&b0 zbr*k}XP-dVH?C|4Ok}RN2Cu?fgCbF4J;J}TK(8g|s2Q!0`wgZR?|xqts%OuZ1Oje- z@AA$LChO}Sz`5VYe4f`-AKDD^0oaJt_`cuRJyKV!77jl6`IPwV?i)#Y7Ja`)S6o^= zhF;rMh=e=_5eJ{l#^<`sdHLoTBvWN+bn%;kZ(|R~d+0vc1YsGvxQRZaebXM^TYiIF ze{)trz^>kS&F}!FwfsU=+fHhJk@FIrUZ*$({mI@@$wNHa4V0ek&6@W9J-pI;shgY& zzuw@l>q!g6OxEY#YU@dWOptV>PwTPiIvY;#t9dK9sV@iJ5C-FnbePzySF2k=$;jAs z-0bMYg;nKV27BEn9jYq1xcN6Xg#7JT5BBOxur#Fb8hwWo2Y+AH+40`GJ&1sBp=an0Nf8YS#eUslNb%avtdR#MalcFW3oF+S5tVp$B1$yP-vA(xdw zxYT#<#8se)xNB^z91&~q=yw|yTWAK74}jZZrS+gE_a$q@@w`SCKi&p$)2<4jeG0$s zQm;Q!SU>D!qIL849eL8PQs%Ly1E25U^**Faf_X#1Y<1q^_uYAD=ad$J{VQu!Utr4B2A`&B%l1lP$^C+Nttox%X+9%9 ze*9>Gn1W7utwtw1){h~Ou=HPnGbPUi^4|fy=+vx$0<>0ogH7<(n11W&qod zD}{#DTp0(o4vNpTw(t#J8q-gI=X9d%;IGl+PCb~hKR?r6Uxc%alJ^KHg75Sx+=G$j zV8^{3qxB`89t)eoOGek>nV$+S)tJ51I!ONjdupfXiphMB*z&k6;^ErOe53^Ty&)90_r$5d0jToN8DF#+L$mG9UlKIH8sm}}xg*BQLkiSB&xca!S3*%pv@yKl9? zTFJDii~RTLmmzwZuZ}hO;;@|H3Fl~8Sy{U3-q6*Jaa(YEkw)<<=aN47%WN2hmx%r; zSKeyt9032UxTv6T9N%YqZm|)Xy47uq>1fe0(y;ono*$WznP7Cs{)LEUfxm&1|AX7ACoS*Ah5nFAivC#B6`IA7sm(13cF12=^mkuzP+S1?(OC#hxdNbeEwqW zz4wn#5Qr~Nz9e+L9qM>-X|mKjIPiquog21?>d1?bc4?fHerXryhVC2p9v<4~7m9kj z2Jw={Qr=8|?JFC%6m)8wRMQD;w$szmgQ7Lme>pHKK>)7paD&r{^nbW#l44|5_W^{P zWH;w;5x^^g6{dSR0XRWLKVd=9j zLW(FyF|Ql~;3z%rCEKF!p0=ucz1;#NXQ^wEWhOyOtrPmq-X(_MZhH`}Eb2G=Dd|g9 z5bp0++PjUK{!74F!LxvOwRJZTQh&A>E4q^GL!|X?MfjmzNbm-*ysB#jZQuKJJHIUFc|pCP~BxWMsTV#wBf zg1_F1ryi;P1NE%zZ+L9^FDZQgh@^0RjwMkzkF&x4^xH$6v;n|A6ZOmqbdw&hTa4Cr|tp zRrU9)|9lh4ZX%hXN!5rql5^y39UZY%ReWRhzA!Tji>J!UQK`m?iV=72+>r+YrJdC= zH9$$EL3~dn{^e}=>u;c>4}4IHle0YdigQ4TIpJU%)d?_&JL&mA6_u51tYTsYi#@6A zzK1(8K-our0%{+GwPgK2+~^V?N5DmfDkRJke20bQq6WfreDVbX;#hF(oib8&L9+0alh^L`WP%zRSqA4NB0n ziwQ$ZvA!d_XBv(sUL-;|qZh?3FXxA0Dj(b4mEA`Y zvT7P;6zmt|V0y5rqYuOnzmP`F&r^21r3*1LZ85P%usrq7{hnlg1$kqkWS;VR`w>6L zd}7+JKZMu&PW8vM_VU=I8*0kF&C{bO4-Wp8#*_6vTVD2-T{{ z$m@CmAW&EjJcOS2=erqj+NlbN54Lvo!kWO`uCkwiK55afs-G7u&YW`;ShaO}j0*(Z zONCZMG?haFi0h_Z@wV25e9`bGxyyI9jnm$Qgj^t2>pk$OhR;WJ-5RSvml4}*Ezr}< z3AI)hhYFp{b#`yf#2ry+(KmFgS1E!{8Go` zgcdz~%0;E=y*zWx5(RG%KwahXJAh2sfAYQm4==kv@|$0*47tQts9Tbejydefki>+W8Fv zy8Dfwjg@3wmDzM;fw5a@VIOcz>?cVu4KDFq+j!uprQQV{pIWzkVlCT+lbfVUT)fSg z^brFrFm3%(Cq_FK&4u}9=zwwHiqxzCK%;yvT`gJ`;KQo;4C0JjEqI+E};SY~gi?v^8t$k$$+Hoa(X&Y*CAl_-_qB zfMh^fBOS5%ghmDeH*8#2O%A7b4FCzIFAwI2`<>f^ZM$E!r-71=w9?N!`?{1}*=Xi#ks9;thaqx8xiKueuv0U^y3}uHeu`#1oFw z?xUqJiD|q`IA9ybyI~E2ebf~QI+O0CcJq9X51Z%5C*~Zxr_BbrQ77f%>W8FY|S|i`H)ve*}GW@GgaKB$I(zhy|R`A9x98E|g&@5%3Vo-dAv`R*0mo=@&Yv!kmS;#%{;>2x!UkDGQ&s;%D}Ah-^{OH{vA!+tRr&JXv5f-1se~_D z8|HY3Sl*hA*d?c!tXo`%KvzKJu4n>P+X9THX6&FuJqd*-vJ@+Z@1d&?U_w*9nxMJiJdd` zf$?^qXPSN>!K<+Xv8FtFJQdiWH0z0oAXA$(R9sK{LqYq`f}u-quHRECd%ODLgyahu zd^%B(2b>AQK(VgA@Kz~}5>nCXY_ zQzMo0i*L?tpoZ57$FjC^n!nK>8wYH2H}8PK;N@kz~ik5JRssCq5q=*ON>2P+E)7-h|< zpZvDO=)f#Ue{U2KtEH-&D)C9y@cmPa@JGL$+EVK?MtjzeCLln9w<-?NjioxO-ayJu z*fS7AVDI5+C;5At9j+%jw$U}4nvPLUnU*dm%@7MqfWJunkLatnKRyq`!=XUEsujy39T_T zZ#O&6ge03SN^3w28Fv7rYw}bTW86U36vTny zM81cs1XBK0O#O@ZF5T_>aeo~4qfYsrcC#1p5GWbV@VUabY@6Q>QxeATIoi`_gcd5P ztZcmx&9i)wY)Uwdmb+5jx$vPjIc&2q_ByqFQ!{COIZ)Vy*x+lj)pBLczKAIWq4(fb z->JzOw!fWyIHGQXAgg&>?!Pw@mN9eGXgl&9s`&=NQ>EGCzqB%w6<|k8X0=$Rj>_kUCKvch+FMqk|JCi@eB_ z=P1(Pa8pNa^}&>oxl9FSl(teJ5p55d&`OzZqPHD<_@aL7i;5p^@iJ{h1LB4RAV!Do zsL$^as|Lhtbp=r{bRjK3ex5#-IJ%-iKv{ss9)~IEKy{Cn#6#;>4tSMhJyXM>45`C< zETouwKrq! zPnZy|2y)2OcehaKU%}!?7RHdxw!DJlYL1VpqcLkFwWg^0frd^x#bJ;yEbtf$S3+Lg znA$X$FKQPfngE$-%l*=?{*lZ4PfqmkTqKtjczET!UO^cgOMG<^4fWg6E-OFRYC#)t z$mn!Ds_c?YL=O}AqL(u1W#WIZY(zE22-AFri6^%u<$mzkzND^nZ5=~=T@1lbJYNk6 zFMII1?`uU6^K!Pa()d+~0bgb~@fg~>lj<5a;4E>0ZSH}l1+0#Wenb(-F7Fn3wFeRC zQW{mWkGu8ItfU<%y`A*xKSO)T6J-l0??L<)=CSgFWMG6~ z9Wq`QRWnI2DTh^d3SqD7;@3Dl0YsWL|MB-EF0Gz~`++n0SD(fP7)*4SHvom|3IF8A zhPOSC!mkiO41_^B?Rb#ueEy0z`ljDbVg!-Jntq6t^}^5xFAPKk{eEo88^(wp^;CQ8 zH3Q}oh?NLDh-#*))o8uu($rSZGq`GDVsaO|uT1A5n;lDUaY@&ql1->=iH|2)VGn}dxfkw$a@I8GM>No$Z8L$(KxnE zyN@vLmCdyhR`rFnfkM6k}9gtp=LL-g#ZzxTx1m?*UgcH9rjw{cw+s`V1L3?r{jO)W++q z?61?%@{$0a)Wr|^Fkk~nkhTV@HeDEDk}vfH_I8Ie8m?mP4@)A}^n|(j5gER+dVVI( zZKXv6q8Txr*|7O2fO~JV~UbR8j zBmQDZ56Bxp&!qN9N=lH^L0*gY=R4Q*Xs6TQI<=xYovL;;s!ld%rj(bLFMwpgqYSi4K&_hQT*FKiRYjmoB5oLT zgrE^(y|AHL5-p@T@fZZh4mqdVyT2+aJCQiqt0CUEUD%%yo6Gf2fM%H-jXMBl0tgtQ z^0M&C2R>eJxvjvYtf#U+5aQz;q?@)&#ZLN#t5>Rr-ov58de3?g?!?g&W`Wb0<6r#N z=4Tipt3sv@bW@SIjetFU;#gL~gV03LhSkvo)1YHO0Io7QlFkdnJ*MaYKOJ1zOTc5p zT~zQ}F_pfpySD=n77kStgI%=ec8D*;Tle~YQ8rB1aH$vH$;29rSH~(+#2M9Fr`PQ` zCmmUS6~IlMD#6J8lJ31FdzI;jE!^+YZ_wT)HRRWK@UlsRByO*91!4_(M*hp>yip~? zg-E_ZO{MGYTcp7azn&DKhwdvw4+Uq}C|>oY(%zTBM4`Ph4+)~$J(?)x6%=B2c6M|V zUjw7hS(f$63o%tuoWlePBeDsxS)jY(uB?GI^3W9w4ZHfG!5{h@d#epH<43)LU_2PM z#Ku)V8L7`Uln%nCb55~&>W>;Fg@m-H#KYF&PQB@&v;Fo1iK%0t0^s1;XMVUoP<{8- zFoDm7i4n28D0gdQMWAJzTW1evmC5lKyNAX~-Ij>fQN4RubUkcz(}nE+1muO=y3ojBXFW6vrm{y=L7^lE!s$z@IFF6B1l8{GErbzt>4d;(0G zp5j1X)Q9defXjt-oxiuvd9IZ7*)M$ZpG^8`t|fVaM!(O{ahx^>QHBT7Yqx6fva{Ca zboF56?6;kO$KKUOu#5EDDLppKj!m|gB#&vHo?hS&X7`*qCpJy7a z9BxUx8VMpnN2)M#aVa0}EHeXJ^-B8Nmp=)6_}cltcxf;1)je|lwxc5+8}v9@_Q{(u zJYmIl5IwSnn|0}UZf&Rx+gR$1-40TiG6zyn7X;v;@9&-Hlxf(ASTS6i?z*@FkvgRA zRw5 zK&H@#XMO4PipOB@@qpV5)qJxDU0eJbTvSz7WE73#K~tI@gz1}APOT3Ird^*N zQ~;a3)P&%Ccl1-+ZQHb3KZ8q9iFRL`NCaPy1EJoI9Ngwe;_H1pz?>P1yTRB0m=r{)5v!7@7bPTK_zdfjf_ zpm&7qjVkhY%hlxIdD`?Rg-$&7H#2`&iVBkGeQ>9-#uB%tmWT6pA#zTj(~fFi!I zE`m+VAtifbGYoN^V_}u)>_+XnC!2o^&dY)K$R&%5_H!0Oa-i(K(v3^?Yx5W&h!nhX zYE@XSS{YeHNL#G)Hn1gjvIWH+MfqdqOSg6iZC8WtBtIm1R!x#Sj0QH;;rhR=VN*6O zR$aFC{pKqE+ss}`CFASGHU1kC?knoF5uU?K{7#r}yVKwoxY|YHA+)%oJTh|mu)>b! z9H8jMpX$1R)T+r$aVl{gj=$ZDsDA81JT9@ZBA7KiFSo1wexr;>C-x&-gP*-ln;ynq z>X3nZ+9YWIB&}Mbhc4A7Xlof(xiI6YIr`eI?ZbHzE7?uU6SXatfMAwUf24WP-P_PR zL-oS=*FF_yA!5?!_b9DHTYHAUTNN8##f}o1Y_z1B(7&oZ`|o1sq{0ZlM&Yn~Sj0U7FZcp5&EWFcNN zrvLV?Y8bc6d6g8db10I`(m=}9RdAe6#mgFJ~#y8%fse`m(yMzegkG}H6776IE zZ8j8dOXUZ{wt=Q2E^wmp;Wu}X`N~O898kY!yT^eIZsY&rJN+q^@nExM6;Trj?kr}p zDrF#EJ~<3&^1(${yL|nky!Cx&7@8~`xrSmO@I)bxr;ee4b(#kL<8QWh=g!TR_IjDV zkS)&Bf=oF%IJWa>tXL*hlYBWKh;r|aGZat+jGEMm(UZngL!0x&<#qx6;;ny1L`11UmCrd2EL63E_7Jb9FATXMlot zZi#8N7A!B8w5`JhPkW*E4rx?J7T1a=fDCD?ZI{`e$FSO9FasB)MV!#yBvNbO*LfP| zFA-ezKl)?O~Qy`>~`{b%}|SBW!b0T;LM2UEK(gH{8NcR&x8OQZBcR z-BYP-Z>49mhu29ND7H^M()G@7sa=1e?lV+P@3A@_u=fLPtjR*mmRfBn@sMfXUuNaT z383e%TU-D}uDJ<_X?Bi=PaXHW!YRoOi z27kPPwd=Mjp|A0ecDD-Ntc}_xVLPHm~A0X=o|zVM`#0o9@=ur14xi>3^wp z1D#Qo*?IhVg|L+!hnqGIx(n06+x$M5q_^1J8zsw^SCI`yi@oXGr$GUk6tEq~3~*-y zPe*tZC~eP3BLqsi=opV|z?TsBHSBc`22R^QeqO%XCCc<5Pxn=z zP;c;ZkVk1_58&0+?uu*a%Kkt0-aHWMcKshOg~}t9&n!PO9C+lF0 zNs2am>E*p_ZSR@?90qpXP9B;`}UmkEa%kcIp_TK`{(ypjA@p8x$f(_ zuh*hCFa~Bd+&`$hhf=+%+jB~8*gS-5@MGjbrWAxMvu)%EwpX|K?NHdzUSn zW|^}|2ba5rg?63Q&{^EhyGXB;68}a>v&IFIYThM9#T;#_M62{vI)Ctg^1S#PS+Y0u zHw4Ny%n#a*KQ{D6))i>lW9k6(sV6(N^Xe4!AXGuTV3L{fh*W?0A6f2o)1>CBsqL>(?oZAoNPhp~G>=Tn5ep%D`OW!Q>-TtD% z8hfS13nyuW_>N!Ghq}D67e~p@4CrT|RN+;EzIC$l>qN{KsR5_8-Of<^*APFe5>0Qy z3tj8%F7>=oi3<3dVg3*d>El?Pt+`RmOxwDQM3}oYF&cuW+r6cnEjuT8;b+e(PzAoX z`|u>|8+53BBYga_K)HIwJZ4RfU;C)BMM|aih{#Q&2W64d5=Y+RGy&9|#%^$v z$U@lzwpqPsK4-1yjODa`9%`nW7p*GKcEI3Yf8u^#I@!+j4HM*PAl8W7<4} zHzz7eoAwTX$;zNF^4K}ew4oN~-1UTb8u%=eW!lxN9T*W=XzDPCVB(1~0fqgXn6+g-{!$^l%sJN$h(-V5&F=RZ`bqhtBrpEJ6A6u@*3v@KS6_>H z3(COT_^aLDZ%?#FQ&dMkuG^JcVZx5VN2I8BO6pDn1c;q&vCMgDz)YDv-S(D{OIH-@ zn&0SvG;rEPUCX<-*YI<#SnO3Ebg^dJ($}K(EUv5n0P+3!;s3eaQi=gEO%8oT-f!=v z18?Q>BG#^XnUEQJw3o_lf9%MA+&_lMr; zBGcHXh%!o+uZ7aP@zQA6PG6bW89K!==)n^RKbc`~DM@Z0JOiNg%FIS!%Pp6J*COLD zMSev)n!9XA-*#L1MkfrJ#Bp1-sJvIxkB#|Q>P!{h?A-FVwzV%@Rv@e~ID) z&vJ4=Zph(!-Pf(DCo1W=FBZ>m*(;kH@}~eQ!@%7@Yo_;p%D^QPG?bBI>)e6pi243r zZ)!bxfq2MPtvu--%(oP{92QauHrCs$pFRG<) zhYmb{<_vMEynN-6epJ;PK12?Nc|YAruq{5}a}sds)P5?{c$p>x;hHp`SLd`K%j1Ub z73aUBKI*@D9;4kuI{izQ6JvfgXcaIdTU>{!rO1jHMc=aczpv*+a~=o`Oa`fi48pd@>z} z%|?p^GD|s=<-)Ytv6`dNKvXK7sXOd8m{on|i!OvK5tW{1eHK;fdq!36Qr!3V5vsxL z1Ifv!%Wf>$r)r+OUzz`j?4`;DWCwFFL^(?94fYG70BX>I5{EUek$XSfIhVyly(fhi zR!-BZgV2lq9v`3}3_y$wW?Nkf{(T9Zy#Jk5VQFhCs&F!6cVB76bsFakEvh)6^9i}uX?0iqPIv@j69?_&gUAsA z5TO-5q&r`t6wif9YoTAmhl|4lang`k6Z^-(Be{LkV#Bf{f}~d`BoM=j%>f!D-@YVC z!U;X9n|%&8%*b0BNZ5H4-fCv**S=Lfb6Zn6e6ertw)58|Qq%c6>ob~`A{ml{(<4$$ zKVNWl$DZ6;*9C^is`xZ3}R-9p82_|gPi0s4O zsG_IA7F6=U8_YE&S8RiN(@r><>%mkIGz3BKtEwyk-I?sXyxAaI;_}kc{iiQpD8P{7 z)DnZoY`NTjTODa9vTiYXYsI-5p8hbQK$_CX-Q}>!WI$iFS)&VY;Eq491kkEzrCqku zV;=>KZrBThU?1PL-QoNqV}r8x4hgMP4;{I1eCTvN<(ynNVI_rH#a0O0`gQ_ahf=fH zLzH2Fqg^xXZ8Se24xrd3b2XH4GEKye-prNLOiR({IPxnd%Do&skd;%q2#5_k{EZ@H@2$Wy5!)1D5lewQ8TU6av(=ia_{FFS<8f>7{046;xCttgsA)O= zwFlD2uC(?Zmv-U8pg?IWG|E?pam;SRwE~(~N5?Ox*9I6>g8c3y;|T9SjURcSI-iUI zrbr7lRqlGkW=K`dYB7l*i;l88myL}IE(BYMyCQ_?r}eH2DRnQ$;1pgWW-jZcDzhD` z)Dq-v{JYp4uF8GV)&AZ$n|=Qp;DD2B{?^zlJ;8H+)j&Agq_C68#zSHDxb|uP0Gon9{@Z9D~fI7o)yU^8*DP3(7oL z*LgLq9EEmuJtIpfIk%iNTC$R$0qtjV3tS6{FNjdJnpvv>iGa>(&!nad^jru);vfvl z(_6--l1%E*-P@J2CPT>Rq};I3QjKHBZ&tgHc_5QagK)A#*@tv5!_@Q%!-jH3K$boV3)@S_;5<}3HY@~`#I#$zq81yc)WMI*H zmM%4`;jw`(%bzEaZttPJ8|ohs@wQULn)$C9kDO7#d^Qyt?YbXY(n75_)EuoXTm`q; z%BTomS!47y(nWWHYiqO3OA+q9e%9r#H~V^3{=PKJjAY$6ggbw>LHIBA!#`_^`n__c zzLOQ5*B^E_@0Xg2#l^<=%Zgl_JjyL9gx9*Z!tvaR@+__15E*XISrw(|A*!`WI@#@4 zK4%$$5_`h-_Fl??alT>Ax7H`&RtmiE6QkXGQ>dxe7mB|iz3)(~y=>isG?cMTY_seE z&tFT3QI`e!m+67-5?Y}_IU~8d!JsZXL_R!f|F%8M8-^-|%|`46(7Ct5nEkip5qGaC z3;Xky@BBo27PmJlg+=u=d$=uw)`roksFif!SEN2?~ln^}2qk zNveIgTvt&hIb$SmQiy&q@y+b;Lq*@r#jty!GNe`T&-G5wpyR{M@b;^JHn{cx;DfsPXn%#O-9Hd_}yxlAh z7q!2|Lm-BgSkY-{l5DRb-qVr_4q(PqFDwA5`TdKq1rqpsi~uOVZ?15tz|Y$Z(H#7< z@cQRt?^wkx<|WoHoVfUt{`grvZJmkY?Zxl!FfRd0@c1YMt2OMEblct8hjT_Nbi~rW zX%s(KbTch9H8oIh1#aP_tFx$D97EFDk-=Mh+G&!KrXBLcKco^^S@K5*Pd^ zLKq}KwFy>SaQG4Q_3FL;F z-*b>R8M!+s)%-5*`ts14)SmiJOn{e{h{7nFdaEO_AKpZrv$00P*HD|mFCj6}PZQ|U z9NTIJTISuF&9&z;Yo`P7W-+PY7ZdHM8dzg3?wJ;)t|5Jo2$q{H`ycvwDUrK)h^b$x zZNE{f{v|;i+shHuFa=d*M`RV7&C&y6R?Wn@0$RW5#kO>`olpt=P-l+lY?ew;k&q7^ zSq};rdiR#4`+-m8ab>(|BksVaw1d6nq!tCZ#CD!ag2p))Oxx4Oa^(U-m96v98Tm(r zyjWK2hlYR*0L7IuPa${?EZ_qrZa0c|EL28fA)-hhfXrbkgtqtk*f=VF=z^5u4GG?#mx<_EQ`T!}6p9VU$;w z2+RG#!=VCg*LA0D<6(_MZ`co%6FUlQx-`|*Z3?WC?K-o65^(-px%<8eg^w{k?rm3V zk@o&@U>$Nj$|9XtJU(l@+a=8vk)R|TLBUf_B+Y2qNUfTakHD?$;td*Rc8V#WYNS4% zmyl!d+*>Q%#>!oUp>uKfijhJZU{CBB>*lbnx`RiK-{d=GS5~3m%Iuw)5v0oT2PUvJ z#^S441Zo4NbRIRUAwg68Zpi$wj5tfH^$~Dg(X1@)7M_*-p5T_(-@lhu@en&2uY6Xp z2$te=-!7yh!&~)QZQ}Td%7Xeuy0^gH5z>V8`sRylHxVw4HA7BlrOCyW@hA2S~=+_CqzU2GVpJp;1EChM*UKQ3jtZ04Z^_GVT8T3U!0(AWN@pF1H z2@KJe1^%EL|9KRF#Cm-OHD>r@c^m$c<#|8x6+UCLg&gF2Nxxfs-YOCLB?oGDkidJC zr*&IK>JVouD(51J;?td#h<0qB0TRy|oiKgQE+%D2Hs||><|aAIKe^f;Mb=yk-?ia1L2LD^~5(lz_10?k~Ex=Ze;o=mhk zm5ZmGFqI#a9%mV1XhuTR@*eiYr%EYnEoXO{>+Ce+4~8T!mAGKGzkFeUPA&=TEA=%M zA60Ie3$%Fs;m97S;{!%buB^W_mNOaq9x?1c&Ub3~iv49}{u8F69l<-BHe~LC z7;r0mAk-nX?{=T2soCmlm!jq4Thj&Ia`mQ9sgZA9d$lE4Gq$o3M$YCKMPVp!DbUbm zub#-N?~&E~3VM>EzMUv(hiwF9;P-j---x7KEy>8RRliZ0(n2pw@_G$x8$HWc)enKJ z_XYqI!foO?s2Xyem6}l%T&ob9hkx*qty@tW-q*tAG=p;6kV2dFSE$mv1fPaAj(nZA zjT&}8E)PEW4P4MjmcRO(|MW`NfjF@yZ7 zlqkCGna`C=q~fj`wrT6~yscm!zPE-yj@w`FD+5H0kxU7fG=t|HKZyqG`kAlAjB5iI z7@5ksySH}ArI@)e&O#40N6Uh@N@oUToDlR18bsIXj*pvSFb+08G~#XMjd9RUt_upg zbF=9sn*7;OLbOmS&0v7m(hQwkhjc6nP$JjpZ=w#`0L($vE+Xxb)iJXyE6CWd0^HXH z&)g56ubTi8g@_&)(-RHv5FF6!gn~M*pse>yjb2|F#n46sWZy9yyrO52W82YH7J_OYvt;8v>-9q^aA(;2 zV*7&J0_H+lKWZdQe_N`G5R5!@ajT#WzhE(lbqrH>RT?H|LGdZj)JtOl13MJ;A9wKY zkL6(jvd{t<+>rLeC~5;phf&#n@+M-Zf#dvZ+-RSSCjXf%n`$sXTZWLnu!!C>qdpw zlaG=DQD**rgfP%a8W&lTgYS}kT%|?W61{Wr-srq7#xFqR)bNK*1N^IGFNtY<^mWFo zprTvBdG4!LBij#sY>9axXDvpr(9q1*f4$+3?KUz5&$IMHUuQ&}n9pDH4lKY>&KaP6 zWbi5qnKVSmCTHI6CJ-auh2>6HrIRtMT)ZFj9h|rBuIj+%w-YhKM*MN#t-%szY2XS* zO!adi>l*gm->vmnPu$`DeSH5C6aJis%=~?q6%wDy8T~jhW0w#3zleQBHkubfq>h!b zA<1EP(rx^Gm5+C+*i|lSo&idIQ2?>}OUQG+TSDKY>doQI5VJ_vAlhWOfQYF0CG+H# z2_wdWwVF#;e7hmYj1SfU!LK4 zVX9Eth|x|(3io6H$KP+)W_88Oan{CX{Va&3em&3UPq2fHmQWZD--V9G_U*8l55+z8 z8wEbP*2se&o2C6aNulIhg*4{uB4%X)D^rPrtgIG9TflQ_x^!PMIDioo47y8YoW`)9T{B=+_;I%XToM3{_na{WcF{BC zU>o0$izhE2oWFwz5)TzB*-hq2mGgQt2)W7mId-@*?M`G3| zyk=gIJR1n3P;U(-(OUq53WP{?ez*)%Fq^E<2wHbd25uB0)1uy-rDR6<-WQ7l%Ep(6Z4g zzOm1aJotDR0Babm_2%131W%00Kjb{@PpD#C!^oD;hd7K`8(qKVc@Dc(zmJ~}cb=gk zlhh`Ezi#KWhg&6>FLJ4e?Od_bs!_ z(#tEu#N3U83ltDSz!A%jrfM14kE>1F29HIZ&h{$oDIn6$*D-AsuNTuAB^L-~ktk5Z`|1yRDoHoPE2j=v!v1c~{{6lG9r&#LsnNG&!(QBS2 zR(->eLcXtIv_>)g*hx#Ww>6xhY!MSj4iPW#*ri!Aa_>^R#oKPYO!?@dCAR6w{ouu% z<-iwpz=gyZZ1}`;wPRlo%KI%T!HbwQje7$pu>kPKV`DpsgY8-5)SighkU2M|{Ab{n z&Jog?Ay*nNU?H??pC{il=17I3fe3?w(HS3SP^Z)m-PHx|Ix77e?2uM2!P8F+SGp7( zw?+?)sm_7l5t_RMvmL7)N1fyLCo-oxo>p4b0`IdxrEqXhbNR!EJwLQxi6rqRFfEyu6-gc|3DQLOah1&ygzez)5qwOfXgI zvgS$NxwbNd0J6)W3*J*fjTMAt+6i(Ms((2N=EXj zW}b$l!0qtlG|#oDjbHO;tG`UA%W+LL*Dr>;9TyNL4qayFyw9GRpnB{WkA6aR=k$1D z;kjj?{5PxO3%hvdt`B1X=ivmfmU-6QTMLY{? zqMxspM9DskhMdjSmgAbf{wVz(rq`jK<}gzY4PgL1BxiE?=_9?TE~z3xQu)&cy92rl z?!sYOIgEL1WeQVQ4E$P6`h*QHvPJP9%RjF~+km<1_gLnJ-k&42Vf53t87o%)D^u2c3wO4)jxXIM~>zE6k4SX z-)CCfW&s~&iTJ6AjWLx)r$(#^xOt;|+PNntfgVWotA!&qjmoGW`W7K)8v9Z7iO9d- z`SXS+vq;irzS$`S#uhkg`}*%s5C19+a6ObM!E#!^t+cOF_W^m`7C`OOMlAlDM>ij) z&R~Y?rBRY+JdjAVev7ZU#+3HqgxzY(yw0+UTZ{wR&R1`kKC+R|1|@zSJM))e{{1a} zA7Ee|e?4tHbCLd4r}4%Qf6T<}zmY7b3pC0zvsj4uE3Lp)bCTKI?YQ9c$gJFM#Nh)w zS|6=OWD#0Ugp$;0{_$Uu{{scydGEd9%DK96Kszft*xND?tEU_RtmQ&DrsL1qyDQ z(}1!!*R?>Gq#HHs1>(`&14b>)5(y2a<%=GpQ_OV_Nj!~%$;cmC)u+3^Dxd4JyT!f^r&vb2Jpd`+YtF`|Ns&7SPl<8eh^vI3;%{Hm zw5qc8hPq>RfYks7*@`?9Fv0CN)ycFvO~hBe1k@4M8DSD35xQUWoLh@M2VoZoVOi~a z1P)Ida8Ffms&O?h@}MT4PaQ9H23Hb?iVdEx7>@tS5^YzgKwnC zBXksB>W7#m0j>IpfMyaY}_ZNE#b4j=C^OAaB4R-lb6YSe;As7<4>kmX8+!enHj71&JI6a z`z`wt>`BNQR9eO9lA(gg8QTYTv8;60L-tw#B&|YU`7|I5M;XyfGSC@|Z?t>)J?lGP zYEewKW6yO~(b%g=cQn2``)6D>>Bd0kcK093+a2z25rXz0l}5#Bp` zY?3xQr_b9X#{VQ=P@CJ#236xT2=8fJtLc;=T`OJ~sg}2Cwvrg+!L&?;S2?e%<@* zd29=L)b38>LK+`r*k=Ylcp8BJxY6C}7+=&HThn+CzPtzG7t>NLM2=v0^yxq^-KmWN zZ~p^$?T4?@<+{UKvD>pQeFzc0xu08f1Csy{4}TLQShIBSGb06HV~!UYjj4Dl9}J%o z>L{vs`fYk3m{m6}&Dzp~N@DBxIH7uLOi7|*7;@gocSRf)yB_SVEBXz-+T#rM*nIq@ zlx%F5=v`Q_+OrDqvl3bShz{_9gjgJ;zyLv=@k#{PMT=8pwEz7Q{1Toz)W#X823!dh ztD%sO9xJX2Fc}}b<2(fqbW?g7-7wol|9zzTEq-YCazAl>VnSh?QFFJugmJ38pcFY_bXtyA#4%$(HFhLmx0Mkz* zGmwiNNkUqRos{6dVWZTfS0sN_QOoce#-6!(ZU@H;k{sD+GCd8Oj!<+lyt6Wm<`!8Q z*pg6FR$ls=7BxA@EJ~~{q*mF=Rf5#2muodK#QCDF`96@-+(&YGtxq~*`NA~=$^8#e zVHx}j764IUp;Dk6rEpfWBh(^3GBBZjb2jhWcl;R~H}QX^{R6k=~8 ztbIfrwoO)@iyyk;jV-~dZ!-9Hn1OgbtmAgEE!};}JJ_rLiv2D-D%Wg}`HmLib5Vbv zkplF$m(0`T;_``E4G8n#UVfWGf(+jBNDCGKZuh9xRNEt`r>VXZ)zBS{DtDoiT?Cko z1d~ue*#Ey~;M(tb&Zc6#c9{eShoH?s4x_B|BS*_K78W#)6}Y3pk^nX9x%0%)fr>y! zxIvL2dLqkAlO7wZ`XSU<``ViM0d32e4@gQ+GR`>HLxX3ls?7|v%&QN0aBc9Z)v)? z@cs@MR5(AZ;DmOCHt%5{UG^Vohhz7H0XPQU3x076h=TVd?t{GvJ?4jS_sh)Hg&YeI5bG=bhr1nza$a1e z2!uHHiw46+%~8-Va~g}Xa7||WSLg~}*dH4IlEhXkPY)tzlPjSKf@9`Et2*tl=94P&IB zSLjHKf%#1mpP4OVyyn%dqf5IU#w2=uBI2|t6#=*d>uLDcmSXN0p@-4^WLxe&+>hR2FigejtqObCPw-i&FUlb=4$5S4yqk@ z5^Saaqci3n=ZZ(~vn5AbIiOATs%PHPIS4wSlU9+Y^dZ7Vr8*Z)u`=;uq7ju%ZJ@v* zc3J7oogH3Zg=z6P3Apbt|Ra(_Ds| z_S=}T4Gqzw8UXDo4&c@S=ilVo9f2u;Xoa`#+Evb$Y^w(LFR)Dm1OtxlHNAu@m>T+S zrQ6apD#DfPnrBn^%Ace8Uc3C6r(lr zjV$-&h_;Z}sfzgyudm;UajEu`;Qh)-K$OYsq)b9vpbv>h{4OJun-Am&XMG5q$9sLLXh?+0W zd$fMND^%nakrRJ<6R4?z`Vjc-o;ilf8;+Z8J&jfLya$QfAxw zPS=?f=guMn6ea+V`rOVU6_YkLbPl};n!&WP;jgc%a>0){cF4991NoStgQh)GozkX`uAw4KT4NBW|I%09ldL-3k$3lP zGh9C{14k@8k&GtBi>NK;#PAB5$Bm9A_zd8tR(fmb{Ua@1g3CRC7L#`|!Oqu+^v2rV z70=HGrt3%*swoSAyF-}gH*)p$1(hz`^M9tGygSg#EX{f-<`x|w36wepGrQOiDa;BM zxdp6;t|~n=&iFoIY7bj@ZXnlcx`?BoJgz*^yfBs$gZ9^H28Yaj_CC_5J`5lc9}S#U zftWT&9P?1e?gxpX$xkEVYEg?cR+n^|b#GGT`$?$-feous=7n7Sv_SXM7HNYfYhL@p zsR^#6=G$JKYg{E!ay{F${)jqCORihxJVx`ZZ}fhOMR~O{_iO~~6wqt-f^bbI;i7+@ zF>Yx=sJrcS6B#~R2JfQMf3=zwd2mu!Tj4;~KFg0XsN%c!tu?dr^E5(*vZ)QKn5VyD z>{(Xcle~Rwx-X>#SRTdgy@Z%!1{+;y*ilhG;(bPWGkxY-PMQk1Twz!H$qY1VpiH z6}5*{Za+_-ZlFl78EG5btbPRg>eXfgo%4Oe=!DXa zcFldpZ=sFT>u(Pbqeh5PF^u~kZ=`eArfk7y#9F2duWB0iUSE+h5D!H?zeR_%eN@VUd3mmODdFN6fA8Rp`bVW5 zUfaU)cZm_hueY)u4?ryywgmIMd@hZ`U>@lz9}>rnG%&7xo2Euxb~=pkhm1f^===K7 z2$fq%b466Er1fW7Bo)o>r3)da?pj>V%lo8!a=?o$M@asrM)=UgZx3H1X?_Yh^kmn) z{6Tr{q&KH&V^5$SIuD9>gDakBoOnwhssG|2frF?m;91rPuave0{{vg;hdY@}_XCe? z0d0hM<*SH0^ba4-WT0{zy}m#)_CzUpz8TfJ3Zi-_GabXf-m%}{@YjO83|*g;m;$s_ z)Q%byCVLpGkK|t#Dsoi(nUxC2|26;ISov?1xg>Dd2tP5-<))9yQTHU;xbL@*>` zjV}-PpcZ!Cu7O%Z;7_L-wv&)MvTm`YGCF#u#BOu%_o2#!^oFRWt*0iJR8@cy`JnE? zaA|m(f3jMn3y%#0J z_krtZ4mEuN%lMSr>Xf{Cn^yN3YWP4?eC2-AFy4av-ivlzY-0orDXMZVS?C8QS8}U3 zz%y(EzUxc0bF6{=TW|CAkS(G9edhIAJ<0E>^}E%~Go1}drj4NWISeC4A+cvE1*Bits{?Ud>HeGe*!AHPoE6 zm?|Af?1(coO!O6)aJ9~x&RHcV7;j{1BPh`#n0KJZ)Y1<4wlJO3n%b-&)&`^P$8d?Y z*}Xcqiu2U6V)J86s-#|qR%iqU$haLc87X3?nMtYMTC?*w3;V>i(4NY%rL(CeLlDOn z= zMs9ydfxf)+kmTK8l{6gceYb0(+VC13hp};s;acdm8TyRl0hxtJ#lWnPL91geC_;AA zQoJ?o(H2#($)^){eaq@`#{VjPXJ&Fz&L$ULzcJnrmAX{haj}5{I8*uSP)CE^?6R^W$vfh|b@*|?+p|^?g zVty80@eo4#y%_ydfVgoRy7`S6-R;9b8mpu{u&$QE1SX1NK? zJ;3OZZXh$Z`Lwky;B7Bb_tnC=h2fA2 z(4^+u`vlklB2}O*anJ2wIlty(o4I~km%7b8hecwcB~8BsP@PB4$ky%QW@PwnK2UPO zNs1XWH`t%+>X57y?XbM|0FV!iVLY$4IIebZBV*p1Nf-!Tndp&n(NvY$_MTI(m;>kB zQzIP0K<>Uw)J!|T?1;(0^zYv!zpUM2Y3^u$osr~~sK(F`Yq~8$fG0Ba-j0gSc9PkS zrOZB|neQP&3J80IN7=7_4K4q?W!=MIFI$}qo{46mkuT#F?**LK5Bne6McREw_tAaP zl?TvpD^SF0r8t^%5I+%;>Qyp2>WlK~{7AoTE)X7eTKXjvAJ{Z025=9ranbPxS&olO z`-%ibjBLSK!)u=;2W!1xUH7T-%9O+jzHh&5bk9(Y?nlg4cwBD1mNvmc;SadO+wD-v z_TE15(kAWww%3NGkt))jLnV8`XLqLzvBS8HSQ~$2l2z=amvzr_I07!|*ITVMH(*b1 zyWx1~tff}det#J9WP4a>auSn~RdN88u1;(CGA-7n?MS}iOHjXmr86gvG+bzbwY)Rx z>OgQG(=Zb1`Zd@bLY4-P*ilD79Rbuqo;2zNZY{}5F>A5-h}1gj9muvThhRYl>j~`z z_8>kqN?*ZJC7>tj*W%+n7e`Y{udfL|elElqHANGu0}z zZ!PoT{zuR*I}Jro&>l)ZE^>xQd6iVDb%%cXn%U??gtx%C06yh}STgj|%Ib=~Vs}&C zRTCbqZ-A1|;pqAPm#~omJ<&4=W#J7Vq9p5Bjic=E7IB9ey`;3y50P|}BHmu7R(_|K zXtrmE_>aB5eOdQPQv_`BoX`cH=9!|lgfAZ8s__YzXpdI2dzJC;3>CMUp6sK_pHg2K z-S&<;U36l7_-!VNb7)~$VOSt9(b-$o@cF=y+;k-OlzoO|Vj@2nrGxaavLIDe6@K_Y zRGH6&dhf@AHVq#Kf#?mHmA9~liq`@Q7Cat9@!!T)cEhJid#&_uz}*9pw+kXFM+Nv= zb#u!B;!mw<{{_Dicp3L)tXI%Ic@WCzb(G;MNxU@d*4?-2U=#b`q5Pu=!cF>vEB3iP z+eF@gSNB9E6)!@_;GCwDdLK_;Vu8tQN1afqLeE+s9NltHtiD2q_8np*c2I{`D=1oN zUO&B8{~=}iqZZ6#VL8ldSa@T#JUkfni;^6)?aoQ#h_50-k^SGopJ zXaNQE7vLi0HF_pU{YgF#(Z)Xd7VDP)LscYO$6LM&ejK`Z$Vt6QVzcHv+3;+!$iS~O z0n3F@SfvWVCk>n`8jx-0MPTQCnARG$SG=(9(_@bIZ;P<~?t7VP_WiQ|;0h>C^!hk z6wuCgVzD;D6Jkbo;O{p(&jhq;ZhH2!Z4T}qK(r2ce1&YKjK+OiuX-<- zx*Hgj@B&G|)I*#Ao!+rTIH9)t;iblobfZ79y!*6vGw3Q?q~jJZesXVk2*c7n4wZg;_D_cMU+!IjB~o{1xA>4yrNf^bfXrw6Z@f8w5zzY2wnlpUZY?)PeOUw( zheSY_&78}+y3KtS_EPKfeH_mhq+_4oFfw}mn1i=vWvaD(iPzTu6<|W}X8nFGa_Y|K zH$WO1xOj^QiLiW)0#2Xo!w*HGzLSX}v`s!7^KGSW z9E=f~tPD!Ot>igi9I`&|9Ht5;Ey`;t0idrn7Bw&_;k8}JH414&LeZl?0kC1ynmq}0 zdgXk+U*7D5F-`;BfBBhN=E3#3UcE?8eo>vsU-pMcv;P+hLp-By{Db~fD!=@nZX)|k z!T}@9CcrdP5%GDBw6<}PYfp)#@gvLtKX_88# z`xM|g%ggCc0n8)}wYJx8*+x?Du#iLY+0NaWiRM>MsmmvZ0p@4=@uCKcE+1Q)D+p(8gPKr z6>?pwq>e&5Jx8I`k(JljEs@zhXGZ-1G28GgmNJ)iIW7-p7nd6L2v#Csv?c}Ef>zHm z89~;78^CPm#_~ivQ*dt6w3>S0<3B7{{%+Bm+5yWekvjTk1V8D?st{Uy5dzn)z5C_B z9N-22s=6&*1Aoun*VnfWAm;o62#hMhuN0Y$#Rfot&Co7l10hyq9~>eCO+<^96q{`! zYFAna{^CMm8}Eh#@m?ad8=xl4WwC^;_%Jcg-t#m-s(jI9KoYz-Ak1qQ^ltysysn30 z7~Cl%DM{2Gwq23jINSYRS4g}Ic^=T<$ed&W3@4`Bg*w$&Z#*^ZVb3V8`hGy!3Bg6S z3FtEeB<{ruEf7jvoNkr`hHbC1|9{Z!_-);YW3`)Gub78%{MFX$;$B0IbyMo~u+1r6 z@CZuI@#`<-oKWfrSQ8)U2zU(!nz0=`FZd5Uz0xn%X6JMLD1enan zj~}PsutD_idaJGiLOxCnk(|@Q$M{P@fNSm=X#{1N3^bdoU}ZZCpAbfe=i9+XgJ+tF zNP;wAcdX3SFnf{)lGD&Y4=i8!)1mC&gW4ZUz*uDm>=#3ClKg%9gkKlw0vt-8K7A_d zm!jh5Qdd1#K%its##hjguvK%6*5+jVHW%+;M@Ppo&SxHq+i{g!TcLwhKJSaHDz2Bu z`Rcmzb@VTEFNmK#yRTWqO?L(e9s)i^V8DCs2{a}rLOE!(+^zTh@s`kg3QHLOxm$os z3#6$i#{VYYWM2-B2`un0xsY7Lw8{BGgc5xXQWdm(H)8_mU~1nFm%(gY0?buBRA2lm z`{~g%;KanYws)Hj2gQ~U$D@RlK%5`(HJ;z{Ud_Gds+;WuOopd4+Wy|r?YFVe7U2OB zfomYi-ajJ4r`so0Rjq)n?Jv&?Hzf-l#Vu9A*}^8Gm1|6Pd?Zoc7IE?M?e2iTOZM$D zmq75VsW=h+Nide4=DI#{F@o({)IovCqOy)SU|z9T?NN?XEs0qdJEq%H^#P%($?{&q z_#`RoXXcmF72M*8E=g`~Zlb0{&PeR$bSeqME)?383oD5R6z94%^cuz)2AmyQ%+PCB zub#)f$7Qc#<_ijMWZz>lvPT5f0zE(Gx}LY0F%u*vS$oU+VF7=|k#V4NZyVcJXn~@~ zmS<6|8y0Ek3sVUulP}~u4slM1iikXAMzGMfd&v=_D0ac1%%(1OEcCxZ4+Zg`eh_jj%#sXa%ljbk%_QqmYLT z0UWk-h{2#@Q?WDQhCBMH!LzzTp~qExoie2t{~qT3+nJr`wBuQ@bi5TI`8nnRRqecbf{;k9*^Xotiaj%aZOE4RWF2_*P0FO5kS&ww-xG$ zUVMjeFQJ+22iG`v(RN+TbR3m^2xqP%V2V*|_3Y~h`Zc%LOsHKz)EZtlE;=#iwOdcF z5|fsGnt1V#b?Dne#ZIDkz{6{qmp+`qUiz$K8T=#hl)WRM>*q`I+w zH0*vr?JOX+?5C1mf9&kvw!Cr0&i01W+y8LV3okj`B|cHv4n*+clx$9%tjz3Y#?zih!ik!o#6Z7G=U{Gqerl2CQ_RJ(&KsfSS4 z>2tXm!{^*}sX)T`)e}G8ptKjB*r0gTD;0Um2OGA%NP5u9rMaP9Tr)2|*Yz#}ut@(@ zq_x(i<4pjp>Z%tC&EavtQIP6B2^=@DNiNt%g#Y}vYd{tysXQ9=emDSU>rDVOBvaOb z)NLnVXi*#@*f@uw>kBv}sK!b{ZLeXnxvs zGpM9LTc7v%c?a!R4VAYrf*XKK_s9g`53Tr4pVRK3p!&cF$U?emj0GjG zpCsvnYxNU3b?TGADBbwMNWQQy+HO1FtT1WBwqT0K!sZUGkO}zOKYvRH9Btn~4N{d7 z!}qWsQ?WZ7*p{l?X%iVasuy$O_GFww6-cP0E4bJi*|FBWT{*N3WTuPh$lz6%Zg%zZ z5nvy<|>%|DLAy8<;n|MFyx&R|t-OW=4p^zr4ImDo#@a z*DNq3taR=7tXr9d&40`HF0rBmQFs$9^O`8A5mGg=W91}`Uc1G8$V{0N5|!mv0ka;o zEgXWRMw&Io3V{M`f&4Nj1w#$ewZxGhK^wGzND*PzvtR7(nBc1B**k>H^FX|!R&8s} zgBOSWIu+^byJJUC;M4$4@({buB6Uz>uxw|}QQPSu^XHDsyS(^Tbi21V!ffxMvs}BG z*jT=7LDsRF$YpKbHM3F73qM0D5o-0(GiR{9dW; z3U6Q$OY@II-PzMa2wjaH1mvL5X2V_;OMsT_oBRKdz4r`jGVR)hRS*QFjEG1FQBe?3 z5Reu?RHO(hy@T``=|Tua6jZ7ry-JndJ0#c$y+eRdhfWBgCO`<;H}gI-&hx!a(eKax zvH#_`)2_DGwaU59RUgFdD78JHfw61K3c`_9E-o(0)px>VAh?!8#oFk-{T85DOrW2C z!DM_{$Ji@t%o|-O-_2i41~{9pfvbDWK(NUL7;1<%0XxxHf2g?2fX1I02Rr%2u+h-# zKr6T2<*q8|R!eP60@B7J;?$qA4RW6Mlx)nBWP~D)C_mhRdNslL1 zc{c_JTKgi7;+G1W<0g*qn>?dQn&oe+nz3ZDj zrWx8)NdM|8k*=Gc4^xQGhL)%GUyR|T9yN`mm?X?Khp}1$jo3GU_MJ-l`Uw(RpZCV= z+(j5CMGL&HCTOdY04bg;MQ||CIH0n@tC5XJmX{bKstE0bNjB&;N$dv4_e%eYZD zKMRXB5+Msr-lJ`JZqjXv>DYU6 z-7kGJkXDgoC)M-kv$$vZUd9-5mUK3oPPGmHLQoXvalHdFl$Y7g!A8>fl~haBry z>(FC%D;_4cF#uXxWCkt4QiNA-RMjdg;2Xx&j zmK!Y&2m71dWk83(UP`G1F-Jb2|FJj$upQmXlcc@M&YU^Z0+_?#Doh}k6;D&~p{L&* z&d$%H7=G%>J0n1K86Vs8f9J?9=~j~eRgjMD=7SYy+J6D?)}#$sob+Y^O3~okiW`68 zyD_^O`R<)Xkug8Qo?b68D8|v!tbC@Y7_e&N_%=1NRh{`q?W6Gzs&h4R61Xc*ZzOw8 zTwwv>s{j)PA|6<@Fgr^wbM`fvUFOFpL@(gln9a>e5&mo^>cM(WsD0tk4Pi%@cY~t9 zaN^LZe&E8u(~w&-S2uL`5(Y99>GVoW0|0kfY7t@UKOsh<-rSoC)-FBlhJ2`U+{(39 z+-JTpsNW!L5%MM(po#uf8fp?>uLctNBCxh4-H!qGceb;O$l}!c_jEnvkYP)}iJ1S$ zf^pznc2T$Zh6exZ`|lC;gI?I93GGXNoH$L600y<2A87_r%Ts}rbhJ0+4Ty&=C03nT z2RuEWKbf2Q82Bq^Om}Bz^r1a*J5|_zsNnQAzY;G^FK|$H)489ruHsUVRgMOp<$zhV z>aC|;@r>ZLkZi5S4k%NbZ4XDJp=+3)_N>XF(8lHji~{~GW9`7efMzWgM(FT`K6nop z>LBO}Ug`55Cv@wLgjN19Iw)wjurrVjWD{=xbp^07$~jhIjk_}7y~LumvWP(MeCOBE_@*owwfvHtq$1K(LK`VTIq-GuNr( zK-B~Ptd^ppVhBY%@BM^X<4K^I=9oKH8Mt8@6VsfCZ|URcoN+1qG*M(cCK3krb%PY$R6 zK`+3>@1R_q-0aOzO!PzS`G>;Z&jK+*MnaknaKm&?u58q^-2E3p-Vc|L!M%E5>8eTS zZ(-RJoJVPHq0*p${<@%hq87NRgxwY&>;hhY$LtMy6Anyzh~V2&b3{VhI^e`H5!b5# zBeJ0dgmtFTvhOiQ^{dM|`rcqW)N_#fXgla|iCJH@bEm0o{~KJNSyJU&>B=E#&}G;a(%TPdzN7OpowAR8&&yVMr7z?j=|=bKf6#H-uNR@ zFK5Zr^Pt+1B~z%#>+`lE^-Yws@@Z+Lf2HH0FaVnCbx;8@k>WtQe7R+Sf`ns7{@5xLA>4}@EV%|4MI3gK#0dwek$wHvtkIJIqno0nJdk%q?oHEC{cd55&}OxLkSNcECF5NCSyqC4Wo0Srp8 z=+C~N-PRCP9dYKKY~R!9?Hb9ftSlubr=lB9W4e#c%_ClE`TD{NOyI`)RSwL3Sb57v za7Vri)nV$3>M-^7@>Nz&P6*8QF>pK1V{a{VCC>r&Y(71*`Ocpb5CBmuAztdc?z^1j zS-|#*#|pasVf!d^1NQeBUAN!Z2TsHTDXPea!t~!J%t${4Pt9+-Z}s|jKl9Hq0LHsq zWuFYGn7;dOfBxY2z6?B*J5CP_=%~}N*uOlnfB(h<=fe<0669m_pH|@Szw+PjY90YX z(u`A=o>2Y85dUcwjEX?N=C8F(hE#tn+CP2ezy9g}Ey4dg1-}Wx%c3L5ri)w;xQ|z5 zXSSsa!J|ZFJEKDxuat?q*4K8$4c|RInOj(ao;0XSmAMDt@d_f5-FN;7JN(z7`uh_o zHv$MvY-dQW z$Z2*bpF8##=MUJo7$+Ky_B05JI+z%iFOX9&GkJ0#_;UQU%Y^J(Civ6M2W-c#WkzGF zv8``^iRiw@xw-ctDr?T!y@6AYdnh(*^(|`(;h`P!WlSu|=WT_(e)TOwCZ=D2(2Iw4 z>nc%V{*%1~o#PE#&HP~{wvMhS0B_y47YcoA@_!Dl5U%QB zXP)HXyZT=dXHda`Ke)ONypBKP{e^!g$=uxh;*~3{Gy|xq1|TR@DyPxo4--R8);k66 zWs|}4LYs!(X&!FtEin`Jd`Ec7X=Ihih%**n=dRjYJM(gv)lE z#Ym|*Ds)WZJ*;6~+VFHir%~m7j(zO0YB(l`&S#(KIc{N!zvL0y?Q)2z10Zs#&;YS) z8HozRdFI9PA1;n!RN*RnY^1hSbeC*qcDq9702B6RHwnV7AvN(#Kz+p#f!B%_JNzLwAPlt zet9*IpjdB2I7}Jim*N8Uo)XH9y~xi(ca3WAXGDW!Un z=}?cmY0lo!#Xs1pC*f6wHFe7Ex?;aD)^O}09gSP8B7gCEyIqC7$r4uavJtJtTr0y2 zQzCaIA2E&Neh9noL4sqM%ZyY1LQuI>9XDkB7lY!UF(2b}iR@@}j)1B-8W-z}@LcH> zOAJef{fp@6f9o2qNI6|?piFP6a-)G_uF`gRLc6xw%*!Bof}@|vl8vX>+&0AI5HJzA z@|k64{kb!`oBd-hiE-i3=0cu!$HS70Hgl}d+c^Ef*+z&O|AquhIBDFxzw_X__|_ZR zPIUbCU?RSSos{A@L_%vG;}mzVq~fu^tcQ|-8ng&3@G7fz+HH(KyW!=yp$xz&;Sw?Q zI*MIVSLE-WaxkyL6oVO#>NlLZ-L zAM6%c?k!%fuzU3ooGi59@XkcrDe4dcZl0V>Jzd%!F z0klxh_XxAmSl?G)aC_S&5CPlpQbLesqGVf&pd;z{2dFqImD$28`5k*FyAsLtz2=Sd zc|=}rs$f6nn-oL>^#S#%87aIT4p+rf#+B@}BZQ5_tyVo(G3sxlqrmFXi7BZUMf2`Z z5Lav2RcbbdkomXae#_8yujIM@)JGvQ{Q$Q%DJ!Q+u+Q^JvDL<^=M}4PezDb!UEd`I zjw;cYxlTHI|00z1-*zhKm>FwDb@>{_yr>(PY};E}7ueN8JY65UR)2RQ1r2a@P3dHv zZqa@7$Z7LRcjU5-=htsszD5>_9${e~j5voRfjJvCZ z@|9Ct@=j`4TM|w+XRTU|iscoEmCoVYVGJCtL%8`s*u6W%u+}ozP*Jb0OF)%@XcK7- za`MjSZYJK!4T)^VEz;g7n_g5E!WwANP=DJGz;(uoV{hKk|1#te5j%LB@*HvcF|P%@ zmt93+jKvM4F(^^7ZWnh-q`_`?lc14m{ zrMhV`f}%dfvh9yNTQ!iWm)(N_RGI>CqbM)GZX7PF&xRCtUn;sy8uC$ z|3>!oFBQsR?0_#vZumIH@u@LbF^#Js2-Db!L&7Gwd`n($AIKBsQm5Hdx7W@NC)PHO zU3!pW9=x0`YT?)J^-6f_%lz9J@;roZqdq9H^5gS%QPZNq8Ire>POtKUeLudf5!@{j zjhNiE3RCZaT|j1V6J&2r@4FHzBX@!-RpST`bI!wqFeU_-tlUhnLcfIC6B4Pp%Tod8 z*I1Lkg*jt?YfwMHh?Bmjse)qNq_GfwL%OYP7M=X0mS)5RstwtlVa8k-r83KL8sU$K zFmd1;Lz7YDr1spt@x^FVdvskEY8rXwW;cJ5I~F;g;p9GX?a6_XK<8dB%Tge{kc4Y* zHu&;RJ%bZ?vLGcBGc454WEt1CtKUna+_vhTp?3+Onh}hraA`9JjyE$_Vl=wd;3E6_iTGhE<()uC)9$ zB6p*0f!~M2ZgDt0#n4xuHI;7%L{{=$8m>QVyZ?6D4rNH|7MJjtm2Kdr_jtfzHR}X? zNdhO$T7?7!>@7R94$--d`8ibma>Dv34UFVu%4$1BJ~*?Cd?zI&F>w`>lmdGX48$ly z|J?=2_XN0+M}tHrV`sl7TgkX`g)}yke79dBIf;79UG?Jnsv}DqJCnOJqTg<}O2D}K z^K+uRLEzM9b*C$b`s}W|eG9Xi)-IxG)w0g+9;yDgEv`aX$hug1-_v|{3in7rvhR6| zfdt(-=^X@z%Xm+?T93>}-bkYaI)2O!OLnmqk8;Iwo!BxE?J6`qPuO42}uI{JI2;Auy+{u&h zlNXuIJLf-Dvn_g_*X<9Ms8}Ln-h)>_5bB(i68?Z=D3dU3)?Jmw_M=Z9-s zM9DuDr8fpmi5>xeUc1(^G|Ia&e^>RrQGeYk4kx4cRr`#m6o)>u+bs=}k;^UA=ex{mPv8oh z-r3!+6{hV0^o5o?$-@U7B>X{9!!;RD8M~Z<+a3;$39Ax7L4ysbU%Lp|gwqN{)gmyj zwat;KrUIk6KAc`3_Mm#rbYey0CfYpE505h$KF|$SzZJFp*+vD zr|E|BB)}(1=%{Q`JI3`kiX&nZR{daU~TN*2ap;8%t4+8&7P4q$X z@LE99`v6&3=c7KP5@;IAfv{LA$GAzB{28{!_!8(eF!l^5>2U`v@Hj-994S{(HM}SK z+VG%P^BNdcQSzwH^8xm0ZA#DH2F;6UuflZBf=aM9k{nZ6M(C|NG)fhC{fh!tcuDyD z29Q_Q1$Is$nIy`idl#FPiIRgMK3^8;nA&G0KIW%Pdi$mug3`hyim%Qv?aIN~740+4 zwoQ#Ny!I^`UHR1-wAI;!pGaZ%greZ=?yI{H^vf=#Sb< zNanw)y(H+0@j^JN3IX5L!l>D})NDzPp_JRcP}{gK66$=3rN!6HLTCiArG51(@1HFm z=Qimcci>A@H;5C=7stM-UcGt8f$;zp-d-$YAkvY5@T+~^h zIU;6J>_JwCXs1$}^DP|Z1gayO)lGOc<|&E?IHw+U6jdK2T&Vqe1TiFKsC7WJwI4SF zxpG+baDokD^mpzCZX4iQd9VV)!9|`0Gps|xXh~0+QE=NBR;JS$==3~#Ga9Xe8kN+- zRgP3syu|F+QpZ&_~V(667MIBX;BMKb-pjdY`t2VAB*JOk>MI*yeI-frWi zM^!aAuw7PL8ibYZO4IZaf|tfm zUa`u`oM94qE}^NN0hso63K8}EO3OnVP%f^GHDnt`=PhF)xk+D4GGymUWUK45;61$c zT-|$8J7ooqY7}j~WVrS51U8Yvjr4IvIv#e}NGHp^0o-%&LOoU~k6{W)d8!K&Yr3~X z(oZzrx%*<(k=CzzpHLF^H+S;);tJ6QTvr2e-YPdv6c4Uc?gr0ysx8FQblAT%HgIq# z$aUjfl$Ob!_K~^UGVw$!-Xu`bi1#G#{pkJ2dCsvEJbhG6=(xFY{pLYu*;`~wh-w_o zadi{Jb2mJsUi2yh#pxY`c3Kb`Rl3EAFAK}aOpG(xa5BSkJ)+HSV^Ap30=0UzcX@%= zQ=*GqJs)Y}ZN~~!O(sj>Eb0-;ea7pyF`ZB%r7-**83F>Asr^=Ml8*mvA>X^5`6WclT}VB*gZd( z9#4kg%453tImK4AC1TC;ywdZlC zMK1_>ucSoY*GBMR?Qq_uo#yuS$o>5i*F$Ts-7xnLe9@wws>dM*Z0)_oJ{4lxw}OWyzPj6biG8`iF%1djE>&$9^Z+v740ACI{2blYZYSK}r?R_+plM#^ zG(V}Z&(b%vR_mxk)WA!>8ZP@p2LUPNhB5~;17ky*4p&wYv} zLH4?D>F&BOxK4sj;;M0dM$5k2>K^V0vB$ZxCRN(7ByA07x9BJLPj=57*)y!J6+ zCKo<>eGZ)PgPwdQ)>uU-x+#C2mg!eY3Xxfw`mDxrU8{P0maAL@wuYxW*4>`xq-zIij~=Gj1R^) z{My{#^(J`2bsA$$(QBU4jpv}NlGWvuN)dEyL7{Z;Knb|Qfx9_pDPz01!+1{pMW9Y- zDfS%y0!{@eWPDBWzWs5!JeqEmolD54PSr|q0SkDxkUPVv0)U0iblhCw4FvBpW)u9*^I+w z)({*U2Y}Pd%CHqjgze&`fbKd*C){W}9Fh_3ejufk_!7Ee^h1xp-?)!sq2F4)^3Qm5zGp2XlkAfXbDKOy)f_ zSnL+&wt-rfF$|M^`l9_hu$+(%<=DV2kMXJW%CyTP+lW zUgOr~FMO!x1Oo`vxvp2H-?xz+t~nrVgDmD|l`0qS<*x3tp=0VSQyn_4`p{=6W$5=^ zf;qiW#(gY$2uci6jH=`==!x0kE@D5nI|9c38o0pD73)*0{IyySe5KB8L7BPLR zND$D|_`ygoMf*zJ)z2n!WcU!+zS_|K4IN?a%$A7`jMXE+FS-EJFtA0!s{wk8p6hhO z!SEX*zZr4>hyeifwk1zV0<{tWc^Bm#e6Sk_TZR0gik`R~+sAMA?mB;;(Io4a4+HXq zKDi#I-)+GU%fK(@?}_{G9$GtyyZ&iCrOpc)-=_FU!=L0!+f8WJ`kp!TJV(yrsj-;O zk6!q{Yg4)16Z92AoH}>ELu~(hImqur{hld%LgTO&&3mTC;Rn5^|NL0M#$oj(s3`RH zA4~YZqX@qb_B$V;&jyz7^H#I-$lrYE?|yNP2S}~^7&0^d-7LR1^Z$9qr3&yH2QP)q zlzrQ=f3t3X{9@Z-W_q_qnC}NKxc}USa&Z8(nXaD|Vfnq&@h`5-cM$+Yb;H?G^C$Q3 z8UWPf%oY!C|DFK;G0VSRlFOXH0h*g>`|y+dXT~Sj15yRE#r~uOJOBX)s2vgg@F(~0 zlP0i#aT7NM0n*mLp5|8A?)nzyuR2p_)T)o1ps9x z7k+41Y(Tm2rs!Yq`S(2kZ_koz|0D7c7}-A__wQUGhquF1KnNk%bbs;^l=DxMnfy8N2ew@r z*fE)N1wSeOTyi}r*uR+7Kkq#Uuwxvjw0~0f0kE)c&u<6!2VVgf4}^{)|7W+1zG6Y@ z?=JSg`(1zF(ru(Xto8UgAw4m=$^LWSq?c>rp~vW10}>{EjNri*xy2Y?=J;7~B&yb= z48IVUF=($7RaaV><$Mcj+jm8K`9{HmPtfx97aV}TJ$mrNu0bK$U$ zN+;gkHYKE7wbiV?t5nxV`NLyK3k6UHHw!}-IQsi5l!*;qn;oLVueYlnK{2D17ez%Y zhtd76TZ(~uqbs#vQ@(su=1+@WiB&Uz3~Zue>QwB2Xw=^Nbw!yWu#DWsTLBkhxojK2 z-!=PdrZ#*Vk=Wf&zq>}6)9MGm|K{j;!aX3&uj0ebIK8LhQ>umlNQt;f;vTR}uq;=O zn@rK|oC$pD?#9l}*SD}4|0Hi8pI2R1iZt|!-q)-@V{h*kiWIel%t00=M^+e77RUp( zTrC;p$97sLJzjSo1de!>nW240YQ+rz1k>Ba5BE}MCc=`&e3X<>JcoQ?0Om!>%_4Ka z9mES1wV}e?f#RO?k#F9Zg|lJ&nC>VBZ+vg4jKQXmqm1XKEu73&SwHI@$yzU&ayWbEJ%T)&n@huAogu} z=MioQ9-ZxXN5>P%rJL1$RI@aha5)6xB!3~<<%>y!KjRLfwo@{o6>u5FSe8%7HBg}) zRl(kME$XC5rw<7LML+Ilj>d-|3pVmheYeapAo=67*H2NqQZb~+UPcnQ2aiv^M0emB zKzIlY^>P?`@Ey=iI$?@vZ&LIwtIL3k16~)4LKN9NIf#nS-B{sO!4##bK;>o*JkPec zJiW>v;jdjjx$@jsVce~Qmbdy;eID+;hiO9(^rVHd5^P%;2o_nG!|c)s$3o; znn6UOxbKs^WH@e4In{2SNs6(x*f}o-nQznyhm@iY@tZYZ?RH)fjd5S7)K_KMoVR;k z#M7DDVV6Dq^mITZuM7ZCN1<2do5Or-AtCO;&yR@8|Razi~~ievQKb(MqsiqdpLu{FQ@qAR~8w_gU9z0F73U zy~qEtX!_p5$jn}2PxiE@K`VeBv$VMVe9?W$-D$W^eMpKCjS?$Fd-S?;Fpp@|RXA|P zcQx`C*{05ip~;CL(IUvxiILhly#F0%WXts*)J7+S0!j*OzzSQ+7$669Yu%wb4oy+>lU3IF^PjB?T1ja z*S#VZ=?sz80MC92fO6e+l1~`h8H;dQ8&7)BQ7ofoYJx!Wtp(_7tHC6S%pnG9#;~!C zU$1)YZq9*Hv$Vd7?)wqo*6qkmF^KxQ9#U=}NNsFVjo1fMwW8-zXXXi-a(IdBO+ngQ zNXoMf+z?DQavh)_o&D&^^v@f;Y)wD&3r)6G zrE~+I*EwZ}#`OmCZW-^uljX2st0ebv+FH!q2nZOp9(GsM{Ouif<-XVTL2r|aq1s?U zpOW;UJIyZzFY%WIAL<0+EG*P05ox+>vSB;Ulx&yaNoYg#gveSy|-cANNvqI#QX zcN$B$tYaaEV0HGi6RRZL$wrac4|^njza@TSN3=0({C(S;kR&(W^gicl8+J~f2$OLY zm|GVF9j-A8aPlRtvKae$^=+8u?5!a4_5j7sxIH)(MWBpF*Y1wp?$^oBi*=Ju{Yd9% zBkECTRg76KA8q%{2yW4dpp79(nmMx`46SeLzqW7fa$o#>eAKPT({MoRJl%NzuU+Mc zu~DAl-?;#M^{cd&2SQ^2>Oviu@5)uzNz#+cAByq&Gx2CgWLkShBZbrx&xKB^(X&$1 zB-8u+Bd97b!gR2)cZI#hyjeuFdo5Hw#^roHTqegr1iEQR(0e zSX`?e_{6_WF-vwFQ>owRSH55a)Te_aB=jnZ{jQl*Z!1O|XK6n~HeCh>e3VISt;w}l z72S(Y^mtW$9>3gKKmK|fz#Y2^WeqxCcfmx`_BA>x9OjMd{d803)6IRhK6~dR5T^Mi zor39$20om%EaEPbzb}ykKIu7|S%iphsx2Ia^?K#Yg$$Fdv7n}v4{a3T@M_1&`=~R0 z5bl8IN0d%*yG365g`Ph1)%mo`&r+$WsZUVzJ<5F92Vsm1r&m6G>qNkfJ_r_t5+Edd zin-!rYU@2cvEvS6q)qAAcnN^#Dt@w~ku_yO20Qy4kK&i)mJ8g#KzAE`gfn#lM3ttX zQB)>mAN)*<9+I`Q@@?v1sI8wO54QF_b|-qtF3?lADu4wl_Xxd83WYC-6`+W(A9~gO5>O!q0$MqYfidb7cO^paWe0~ zx$l5upJK}0H`b(7?o3vsP3Z5rbyiNCdgAiM)dn*q`&~=)Z~SGsV*!nV8IA1cz(Nd$ zI)?7)9XMy2`!Co{!9vNS50D^oVV9)96b)zh++~BnO2w@uQ<|k2FN5?TT1}x=tLK2V za5WyiF_uB`oUEA#E&60kFkdWoG@mL~Mf{yE;f9p2MPwti>D?9Sk{a%cR7L_=<0xUzjxo2eD|mXE=Kt>jpEPS-eFFJEImODN3Q`bG4h&)4kJR|N3} zAGrqjc;|}^dVaaP#gcM-s_FyW6x21-p9KtE*#3@#(BfPz|3^Hr*5hnz#yK5ylY9#~LFOE9h1#7M#R9zT`WW6$*)c>#otI62; zvgqSzLfApn@x($og8Es9(z;Y1InQ1>aJ}Xr`P?VCkDcSZg?Ay>j6(2PWi$DhSu2QT zEtNF*YI*&*j&S>Jy*|>~EK=BYT`d1(ryE?>x?CQM0pR)`tq>EZin3ZXw10&VVdz?85pN$Mz{Doi(pFQXx97Ln602R`0%1Xc z*io&a?{hCns?k)D20az^6eb$V9|?DE+ov_gJl+(eYP1pvJl*iIX>{Zh$z~D}I@0a! z+B6m|WiZSd{DZlzCKy) zBGs%%TTBX1UEAF#N2%yLgaMd_o|<$`ETawSAnCr#(kq2RE+-5rQ^EUAvQ-MzT2*_h z?=|ga+SSKk{dQ4DNfrPB_TnB-?(EK3>oJS-vi`VJl~m4vI?dH*Z3obnn*?OBo4x3|D_g=5w`OW?^sBoZ)e*>XRo~x_DyED$0 zb|y2i;1_QX6-hciwq z|AS%IVMkYC^rf4Nb&QkwMC#=I%Q0&hoK@N+FEMpyk-wtzQ4duuJ!doqFhkEGK(^4~t%+ z2Xloh2yhm-kmIE2fJnU;L zjrEse7i5hspS8s5qG*VUB8#NeQi(FtZ9%FayFyj0dIIb~1X}1mtQ3q>=DP{$XzCKK z@Em_0Xd=6x5gNyYR+5Dg?=WyGLDU50_m~)+RLk!S2$(coyxht$nHx~ z1esOQtE@e^Pn~#wig;fg$_}H2qUBvfP?)!1&LM=PVJaQ8YNcA}d53F86tC~&i8H3i z`(v`+kCk1!+ns;VB09Qza!pne>eqa_f(=p5oS};NYP~f zpw9Qkq7m6#CEJY;g}-Up7$@bDKxx)U{?t&mIv7RIMVmoZ4lIr#g3B zBFx$mJ|FsK$T=Z^6k|QF3}3AkF7OO)4vOp}=AKhXX?@_g?TL#;r6gJ#b-(c2)x1Gy zn22ykEnhNTOcMF___+&H}ZH~^auvy7(+;DN?V7{`ZG|3w9naX zd_`aPF2KF%N{=5QBs#1gdI-_em!_nv7S~7zGYW+(cn@tanlhw2jn4t##hBBTR*h2? z83fMz`5oBwa*qRp6qxizK1X-P-coPlGA%< z2&*6+To1fWmVt2{r98Bi#&Cgw7yB1V^9w93WFJa(++|{r^XrbDnY)^N!ffn(`(i+H z0o3EtZVR-v2*2`{r`yOcbMXr$qyacu6Xmi-!rDOB6dObd5e|5 zKDWT?P4`Qvp;L^GQ!PNWyfH96jSVI?*s>-MzsHjXZurS!)=T~cGxeW?hXIWyO5fX=TWq~rhxbEm za#@t!WJ1`jd32+tPPBU)V1s<2EpS>rw}pHhvfk@kpNPI+1;<<*)GnzB=l+bU*p=}v za=fGU2t%i27TodVh_`F8d<4p@SwoNE2J?^z&^f}OuNdkuzD;qJCpE_wX^-!nh_Ng4 zF@H4>NBAI*lQjE+w%yB&u`tZ*XdExexYcm!mI<$R4G+(D)RMp0iIvaCTo<}3izW`l zAGux;7D>bhMb4*0%={J!$2kQ66u-Jm0B0_~k#Osr%%AdTgiaRUahZ`v0e?&%hVhZcbhRKK z4}y31g+$+1izw{%f|C0b!&u^;;nBmtZAzi%>hV?o;QBuqh+{9FQv4}ef6v(8PnH*a zN9DVIfj(bP5gEqd_2>29RbdnCf?OIA=2l&+4SXXFbAtZzfXC`_o^M8i4dK{6p%ydA zbN3rT*|)50k1zQcX5mrI>F@sEp9c2lgY)NFA#y{)_MZ=ObzCRP;8J z;ZB!gf7mPipZ)4_IeH9Z;B=4f_Z9iUOgrYDzZ}8CO{DhB&#gc}%t>GZE(+d%wLt%{ z7B-5<$VaO)5?iyrd;Y%d)L*@-Fa#zL!`esor!)NnYau6fnyUxKKCUYGFKEiXmw8B= zJDk8+B*#BY^sm-u^v#g~9A?qV<;o9?`p=szzywTSE@30@zdJ>nT3`YK;+d2`d4N0s35H75#T)($*2O>1`}gINYd;Q5pq5+aA13-&>r=r4 zND!*GbN119r}xim@qd8|0JqoM=+hZgIoOUKt@uftqg)!}@fMrO*TYHFKdH+wCo8<~ z5&+WQYAEA3ZMzb9EKwXhL0O!j@*^Yr6&MUB_!iC2iFFg`DsV4=d^T&{e)3p81y04x z{SQAW6vM&5p_~i`gJS==oxyj{ExiyR6%ACPzn}8&o&3LfMkoP0LH(5Y2dqS^H-Am?ZbdIL_UiBES{?J zlUU1_fKmlbJZJgI!voa?E+y#ByzA0W?%z5venjGf!0)ZiKR+v>11@Da-K3A;kJf|z zM+I^j1LJ?nVE2<65oZPn4}LnaJL?DIV!u!A3o!owU&22%9$cKPqL?NhFvu$24iT|x zFE}1zxjRZzmQnM?ZZbJKu-^f1mHSllxb% zAgN8poHlSM!Q;x+t8VX*Q2iAE$A-F%ahJ86Jo|Ks7%?|bnHp(&`Q44@6ti#<$C$CF zyDwaZ))g;7*=T-738XnvuM7s@LYRck5`n_!vAy9h0QF!9bx}2%C)PXkEQGl>VjHS4 z^*(l@M0h}aVOjJV36Gb(Bo(4ES5~g(sKx{tye_4ZpXKU1Z!Bs>@vQ{qpXK*Is~}hQ zTVTm57s&+?!1d84AhRe?;+iYwNyoNXCBvZ@4LOmKNnm*eU^g)tuEF~r$s-5Nys)>q zuHx-LeCI8?Tc+fzm0M30xiUYotg)u9SAKittw1~|VzB5E{aYFoKZ{JWQ&0UhMIYnp zMrPxpVa>xl;ah=vzN`|3n9xO(JPwPx-tfbv_i|~KY( zlFE=yv1nh4QY=CR>}iu7ly{#fn5fZn@?CrN&ZrpY-qK>$2e*!Rw>_dQxsm8^2RQc| zI40KFI#=2=vH69ePLphkT)Otp#@Y)*uv&5a#_H`(FDH-?6N7=v^Nl*;vDX774KP5q zF@}FNd1UdrN1$SngkjJYUy1k~vTf2{-!XJNwa%)sgpY;z?l_+IU4w(kieS}`o{xYE zsxLWb)KIyPCQB&>6wlJ6RFf^br}irbE^Uigwws#vGq?YA=IQjgj{4PoH}6 z$m8K?EN+A&#EP4=R2{66T2$+TiRYmz1Cxn+VXB0857<-nIgP-!BYLAx#UL`l2qVrO ztg|>tUCFVqrLeo}B;^R*sE`TF^{EN-NU>lPaqES_+@9jXOWC+Otc^<8O^zSDtw!NZ z#)-Yg;4lr-26h|H_+;bqVFbk-dflU^kv**#Q=wkyX;+MRFdFM`RqnSLH7nxG3)voB zh+asV5G)@}b)7xZOP2+u9uC0iB!g~srHskuhOqTk2m0()N3l2%h9T{9)1I+PYS}v8 zi5&!w_}Zt5K0Di&>@~<%ic`wqHJ+5~vIhs$KzW|ZTt1`J9D9^4se%f+^7%)b@E$p4 z3&hY^eyyO*U+)sVX-qa#X=A#dsTB2zawK4Ln!0=gV64^Wl%kh)No9Xh>^_>Kk=TMO zsOAj7TXm?RzGmiQ{zvdwK>$DD1PB5H)V?5j*T6IW^KJSwP4fUOj0Xbh29Qvo6( zkO}We)cye!)teA94}vAFc8YGey0SaR=tSX=7l&vh}U}`hdmj0AI@4J4e}7Wc;N{9 z%!;MdY{*4aR=T9=apeeB?aPXx%Yt>G0*f>CXl^EUab2H@n9hrRT%Mvk4!zp(uqXNMPL(&Y zivce4zYu5B;i^%>K*(;qmQufH)O5G`QSE$cc|le8zEzntY2B`ukQT1L#bN0EE4{?@ zRLYRYir<#By1LPoLzo1XWu!QRO|~j0Ng~&z#)*iduHBgE&K%{WMjAPHtF~^{UKB#{ zdausEE4-sMWM)MDh{=ZZYfgRvXgfYxxD@{FphC8{$4b7)nv==e(9zVDSM~`S0hJ$JQibajyI6n zQLjI(3-f@E7V$34W6Dv@fZukfv4wLUBzVQYF%PWaaPm#zm*}L@E@|c9!@dLN@0x zLSnbILsB*ene7X^3z;IKTPfWlM&aWY50D3Z_QxJ>GKVQ}uz75iVySU~q)>hoM>MN!q zFI=}H$`lK@Y}L%g*T$GwOf^)&Nnu|%RtDYfkL z*Nl6>(1@I19$Q&Gg(+^rj0-qoA(ml-w9kQ7hc99hQ_-S zSEXzoSQ~wb?j=L>K6e;4R!f5>(KG(#ZhZfZNLm1b(lbGuqH`)o?kP=G#8nT7BP;XE z)XU~jhcsBf#Cd`>6nl2~py zhymHB2##?DX{XphrOq@fl$@6FQWH|v7`in2t!ECmN4^> zc?iWw#I=zPIJip`KmgS&*M&Krtt771t9+K9vsPafnv~3pjczm|`;xjd>B8^23+7DZ zp)(Yfzq?&bmlD35NyXX?ZQx6~PO4IAZc8I>+zH$OFd0`RubX|%eq?Wi=uO(#>u{gu zf+Vv&!Ob2O9?-oyse4{X-Gl))6tp*_M7h99&_Lj0R+Gytdq*GF0tu|4*}&ileZHJf zG&`*{^3?WWc)UF%oYP@E@#4I3c5*Xb zG?dcMBkt77o74fK3rqDRITpphG!(yp)&bb{!UmmCl?pNLCVn=@=Ev((MIk2XbFQ3& z*5xDseetkUEY7}4r8-;>XE#u$VGOl`iYk9CXK+6TP^W({ko7muC<8p0Y>G#g%~b%Q zb7WN zr$0lwmsY)SzPy`o9bqRim3%&I2;B-0#<+}?9ZFdVG@4rRf0yk<){%6b4EcIRv9MRr z(YVXBE$L_~pYA^@`41))uI>ePn^PQpik1>J`Pk!x7xjOY{Rw`p`V2Tqk=(ip$8{W<-m-o|)x(swEs zDE;RpE!}1I&dDn=Y{r{2wjdYB{9sIP^peo-^TqqJKfM~<2P+0Va<4q-Ml0Eh^&Af% z6lX9A0N?D-e5a1hjKU=g|KF=30Og-+8&Lz!zxPk6|D4GI!?_tlayY7WewGed3-q_|y}4N&9kZ%lJLV%E02he@UgeRTSKaqg9ggw0-~*c1R=`&iKiV(hN>bf$6I@PBIg;w0>7R z_zVIQdo*8ib9S-VD$HI9@T6hBB(AUNaTq!UhcI~(HBCQg3cggC+=>78S=iUZVXn=( zx4{*O$`~Tl%Ss4UMSVwiDtV_!2Ym=PtPilc>I!~!4}*T?LtFjKqwPa)S@cEj)knzP zJLZ!P{TJ_aC$b8scqRfB)cBms*m1LU{jNEFCzBouP<>ojMfRhNTI;CR2QOxPqU$ZZ zbtbC|vFI-+O6HKuwx`7TaCL9gLxVBqQKjpjwT;ZLcDbVZkac{+VB#1V=5H{?K9gC1 z%TFwPP@qUiKvsHd*6_)HUXEPf^9jZ zHGkN_i!@iSlXuhKH2YY~8aLS_t0xR>Dmz#-#xRMRSooc<4)oIjiv?E?EFM)1?Rg;l zZwF)USwiu!2Hnn$>c$%r?_=hHd}{xf;mbr0($;-FV_Lya+62EnCcD0i+7rq;L6R)Y zD3Q&(Plx^XZ4^Bo@(dnK`|UO5EZoM(f(D8S13TXY7q? zQOshn{Q~$H%)fy^J*oX+=J257FaN9KGSLkMxzv^^D0a+kF*9Z=o^>T)erX3b{5G3X z8y_L-LfQKAt zj)wmOzC}nPl43wR3u-D#isPJNb|?R=Oi_dIz&q|NJ(G67-=WhdGa|^31Xc9EM2wHS z0NYP9%F%PDHn_>M4d1y+ugi4W?heY3I|<9al+ZK><|XYSqQr|Rdz@Hle31A6;ack| z`V=?quw^=Tp)FM-IEznzhU=ewX!*|nJ7VZ~${~ZT#yaRjZ=_}+k9yFhrwdcV^kIjl zi61_HYK0v~wAQYNCoH~B^RuUYMHH}uj+V zwCRNesw_F^4%`=~S%jCLm@&(YsG37+1y<}=)qWp;HfmLU0WSp3Twr`z{$3>gAF?|; z3yDD7PtB?C&Wf}aP`yjU5IrHUk=6tgLuM?LG+#QSPBs#%J>9yd(^9`k_r*j4Aefz2^UQ}+Bn=&zz?jz0uvL?O_ zSKvP2?U5!e)`YYEPv3)RHD881C%@@@tR4_j^e?uhvr-~L>+5Je*YgxH^Icy3Q$FwP z@zX9ss(F}r@~{=tEa2^^PoQ}xK2Uns*D=Y#q^x=qxRckqr zZ_K=;k{>2%xdahc7aaugMfUFB+ZQq1$eLraRaULNxt+jEheh2xFe2@319knuKWfDf zOqnJx#c3>Ggv+t~Wmoq*pAdV$J^J#f!E$BId*h@VZricT(Jv8vMnuhQwUv@gq@2R- zF7aqs@heKFG-LAhom<9cmc11CTe}bdd9RSD>5h$h0+n}8i?=fIb$R*#DK8AUT-Fa+3yfl*Dpr6+;grN%(K62yH+V}Ej2o#4OZ%gnmPx5Zn|f9!t$qwE584E>(b6W z6_Ee(eB}F*Y`lC)_C@Msz%xxQ9fnts#2Y96-HVMs49sYgs}g zKQt$1

csN9viH@zwetC@4hMyQV6rCVX%%?J~D~EGKe}JwMODqT@}}7ka+=$A&0; z??w0w?}IR{#G!oc)S=EeI%6&OsEf_z)9YV-E)%M%8w?JDE;jCEZEtPmJf1h^8%*xJ z7;q7pHoZoCXI*2bYVF;uf0c*^WDqMyE^;2}7Ry*!NBKF|kZrO1mLgS1-a5zJ276|0 z!^YjyJ3OHQDJfYN)$wphA1AKu;47)EMc1c47KXUqZ3I)t9ft;+kI_&p@P_k~8I!vn}6u0Z1l%ZkmZo<4V?WdvbX@H*$Ifz|9}Z z7EX3K&b>SCxyVQ<2_$L6pVvCXCZ7;4RX=>ylvg~xQyhyDV1vP34Xr#;8WEzS$o=#* zn+LDixTgc_V?-ZW%lq#^>si>~HDhjpCmn2p_c>=|g?k5b!RN9$rh!gR2~BPrD{ErU z@ikOObuM+u{c!6^S(mi*_A`wf-T;lfl$Fi>8}iRR!)4y_h-`PoivZ45{DH#>XgJ^R zI)wOEWI(23UhDV&SpC!Ej?Y%W&Bsp$NGg^t`Kvk?f!S-triNK7`70HgNxy^_x1FEv zxyS`}Ra>)p0#T0-E@?Of)4ZRx2w64HUd-~E*;URCYGEQxX<|(3jWsE@?*xJdb3-hs z+|779VlGViJEjJ?emX-YQ&VOIlFT!5@folpvz)4EjsEACCjC<9&_vI}xAsHj7aYOs zJ?i|8M6$BbD=eV#yn|Om{75J1m5h6R3cdUk(GvD2H9Pp=f^)+vX%TesyWHsT7mICW z$8j37{OtA1SeBm@dB8q&zo=I8foE z^;S&kmYf-PwZLW{%qhEFeU=pwxUrDGyOe$#j`HGRYP;7$$s32Hhf1$tx;Gz;3?C14qMlTb6a!xT zjG|bv{;-hSfCHc${=o{g|1M$`C=#uNYNb*%^!xFpGdW2E64uG1(=nO{(|_gcJCu>Y zvtWBkvt~u*Na#;c(#6bAY%r9HRZhx<>*={jL7qS4UwrjnSee4-63zH_JTxC(lVbICTTRVvzlQQ81$-eA zQ*1LE7yI6L)8TnHf5R7`*K-tx7gr}6liN;h>dn2)_Vi(Uv%ra%j34oukRbVUl4>YRddBP*A!gDd zZD{BeZzT$}VE$KKGAd49=Q{U}x(tLZx*~w#&5TO_wK$SEX4jwm4D00|xbwH_cd$xu zGTA(3Y9MEdki6g&TkGRmd`2Ih$pUC+ABD<2O7t8hbJ<~$;V7J2ss|thFMdU_C;)`^ z{r_!yO!a;P>7QpaX;rzj4ipr#9O^t|g*2 zPs`N(zZe@2gq5)Sq=)3&)raNx-sB{LexD?Xu4Pp|NhWAi4&DY6w0Zlkk?vYW-4XUm zN(DShfm&Vl49r(Tu-1)7+XcA7heY?xXzE}7s6k-ZufLW&?Hhko_=W)i8ioIh>j<8_ zafsTQ%pB+ZTdz$7nb}=Ol=V|(zU(GzXE_3wF2NI>@lEa|e2v@Yx9%#Fh?T{IeaJxkHOkPKo_yvED^}=IWov;B!ji zwLh-NCbDsk1QN3AwFJ-*{r}e(QJ_WisTPkUX8&!q<*3mPHz6&)!hg0_J?{idm@YU- z?Z^J`0`esj0f2ChzQ)t@7d*5J^fUH=%26uP2Wvd$In6)ZZ;O>?Ulg4;3w*Ogo1(KS z^m+NVdmV&vTlho9qkZ|JR!D!gC3*#}cXZlGJO8V^wKceQzKZrS`1oN-z!iBve#9>^ z@M4a`9k|M=k$Pfw62Z60+8AKl9Vbc!k)SeYK20{z&g64uHjaisU4Z=%qPhL6XyMzF zhR_LvF-Dkn#uwJoM@j8f-JUIBd;@E{ds$q%r=3l0%2rf7r5u;GcXu$ zE-0m0x7qJW_7t7_}uMU2@kDsrkh8|LtZ9{&Y_F}Zy!OK9#fQ^WlwoWRfiL0pDjO~^ zl_@5e`gcc%mzl)(Gi~<)o7%K)u^Rg892J}!ntzOD=$-)P+OmMvs=`JEU$X!F@GW`4 zw?*aA`I8^PhJyuX67mT&^$Xmsq(kP+7=GheJc=Ro9)rm5b%JyZqXRH+ZDQ?4t^-f zfPYnr0BQ1N2dWTBrsrPixd*#2hjhpd8UTq$Be%`o^CKE&z5~I}#COEW*P0a zjzkxdUXi$qR4<(mp~|RCX&;W4(~l7;Wl1CPyKzei$Gitg%NZME_pL-sJ0A8eO#naM zy~Kzgoo>@F24RhchqN_6qKEa&^;ufvm(xm1P-?8fm@&Z^S3_-_PKdd9ckOul`4DF; zJ>pQ8!_?G1vGd(YQAk=eNYem5S|^}~sf2P9ws<&$`0o)yD!CebycQl1r?aSh|KBGT zY;^B1M$SZP5%*!ibn)}z>hAyf=Kub0e}8A*Xj46>m^W_4m-W8`LmiFwUUg5jq^(;7 zq}XOXf8AxJ=%b`b|w_LEg8LYIo_4&-!~Dv-Hr{=a1jUx3I=PTlw>G z!wx_%U{QN5_SO?C;!<(gNqqZm%Je!_OIR)a z1`m6)ATrSPO@oilBd%|j(w)7~_a_>;A4sv%_mkU?yQd}5%`Z+$%1iJ5i@R>kv1OLh z71qG$9ler$zk=USS-ueSe6YUQc)rRaH}1}XuXKn4cg)Z(G-x?1zBr;`_629vvTE%~rQ1ub=VdJ^hl40%QqSE0RT^rkzp zVX1}mi|Bn@09*XfiA}waMbWmY$DZ`ufN0!91HC65hJ)Nciw;b0xzFoh%YaD1YqZ4 zsnuA^fyRC5Q3th4zHg&&b(e&*MuQgOy6(xjH?g!5(ld9_toQ|#y3*Q_leNOB8aQtT zxTEPC5sQsc+dz@3Pm#W=9#^}4d@hR;ySW}&;IROaR_)i7g>?-o3yc2Y#SVZYnni12 z8b${9v^_P)E#H#tql8aG)yxYGOS>~E5T`5q?R94A@L{Rlv*gR3(w121x&~XsZ>o=iNevZ{DM*#4#zPF8gwN{xt zKiWoTu@fVyNPO3NhqpXwLT|7AB>zgx#(BXP`h%Esw?B275;4N<_++$R&wlaA;Rh8(ppXsf zU~e@U@6)O8h>sbK>nHboYVZ&38g3@+%;>!w33D~~Rtbjl z9bx7&uIT-vp?p5Wx#SY&rFS0Lh$-E!_cVza{zMBkkimbnO+VWF8mJpO<5aI*bF;#l zpS#|Yt=^os*t-eU-*C|v8EKrIP%2>^QC2m5!@aqas9CfOcSi_=Unx%kg@z*=X*63J2 zuaT&u)<*fW2nd(2mIdykUr*^lvt)%Fa{R|C>HaWcAY;Zl+4;)t=Gj_oMA>LPbQ|?* zWR5{ReN4s@5$cUsnxBD`dKDngYkMJ%C3HM$?s&rm_bZ@LS~248k(36-;-VR$#rWvb z#96HHnR~z z6mUN&`2QJvYhQZ!hMbrgj|-Az4|(aB={=Zhq2}8CZ5aFh`)SV)yN?w3GP!f<9thJw z9!>oUK6bBcXol!9eXrwrb=MN~4RYp@MYY+&ejqc4z*lVZmUr9wRa~Of^^Vr%3+sxC z#`LFBQ&r|#>eYz=p>SEEUiAKVdbsJRZI3|X3Ii?Q5^TBw+u9v9CC%nZH5|Q1-lKOmW_y zl=|!mu9sz+>+6>uvy*et$4waF$7lO)RMi(QU(Tj4_8lF|JaHNyJ&pw0szNH(Ftx$g zhySo7rvLK#KSf2E1C^pG+tj4+8yys2W#>V)g6%YUaZ{Xx=%nR`9Q=qe_$-!wy)w|{ z%kgG%bZBo>D|caC^OIc<>f9-Wf1KY9wY?iJk{&S zj0BQanJQA7*&wRsG#$JBmARWs~*q2pp)qb#k3|(eI;fu2eXi}HoeNV zm12Mg-$lWo&^t-8d5m2|qAPY+TRxsq9$#D`fwL5czX z9Pd}oP?fLRZmYh$u@#|>&`HiEQjQ`)aekS)dHoqjF?5p!hbX0{HtFEP8!SSJ`F{E1MWe8HsCo9UsV#P(&@0%?)`;MezSn=(4Gb$N&{# zwNP(u9@tJiyNq;TO+5}nY4Y{S9bbxB55~U2#hzY;`Va=dz6V~ z^4XK-1h+&}pxu_*^e^7-0Ihq}yBY=|F2+&^k;CEpvyVQfL}ceAO1mdl;Rt-ec03=) zADV-`!bZ(tFBR9+x9*Gf1B_lO9woA~TWZazmMn ziPTk<@}rkj!bzw|F!lyjhQxKz!h%!KXfj>>J9ypUr9G=p$Qu1j0KNQNxBK@bb`t*8 z91)lIF@8UE)z$b2?gcm~oe3==>-cm>DBE4u>}C6}e-=XiAOR7#cAyWy%-YC8F)iE@Jzc`fsrinNDGmm5W^POvrn(ayU)YEbN z{L=X6_0r7<=OhUIloIJ0AK}E+!16<N2n0iN$i{?e z@?d3+5moX1nQ0t)B5{>TOzAkt*K%k}f~|dzJpJyMxT7$)h{zs zZP|9*Hwpf}laCj&Zmf|EaWXl6Iopl<8rqG<&OH8ZiTILm(xP5gAF^)c-Dt~tCEyi% zhVZRTe2+B5jem zXnMYOD;?#Q(lUoh-9&l!q${rW6eisf4VSIjW|Dk|+`D>FEo5L}3ouhpqsqNiZpLa@ z%w(1HTE6N`-e3FWGgH|9mGf86(dS85m+_wfk4NkIp0^aXA9Om+jiX*P1#xor&X0T5 zeBP2+0oDYA%}uf6D+pSMa?bt!iFI@`VNqobs65nGO~RkrV`OJ4WOoM+W>~qd|jS#+VE341Rf>TRxEk!N}Ft*R_eA~+3<&YR)YteST!V}T9EVAHn zBB(!Qb)F^N+T8TvhPE7+A)uAG(0jG9mrFggQp0zxAd)J4=&Q%4GmYFFz^n=t5mAVp zwAPSxXN7bPvf;yQ)Bk9j)#z2t`tcw}^k>63S$QwLS5EU4Ww&8*H3wu%IaKgRv1YBD z)IDRX;4_Q_Y_acIjGTOe4T2N)a{Z4Xf2rQvCM_>Z<-BQ(M(`LQ_u5a+Zi#^8PXlU& z_7{^)^)g)n5~AT#Z5|c2%rYGpOSS7z2Cp5@Oo1!+UZ4BAD@1hIJXlFT%$eZr?_85b5GuI*zy4+ z5i~co3B>DoGWd!B?v{uL4{Wk7_(iSB{#jwU@nJV(E~k6SrRX>o4C-5e4}A}uE6(EX zZfoT@kZ9ZdVVdZ(TAx4916^Sfo4=5zn{Slejc*UBELVN3wRut2eNSu@0qvu;s2;e~ z1W8x%HCG_!zIqALIE2i!*@)!?KC#0t-eEAPOj73~u6TVv(!v!GU~YaC!@`EAj$_V5 z*p|#7n4o8Lw$;S>S>{^8G?GJ0cTuS^v9P3>OB_el*>FMQ?CSA37jmmDWq~s`V!n`0 zS`8ndxWM#*WRO9*R^2|I``-Y|2GB1#KXBrrsq{cj_&|V7N^ZCTbud2Fx^P&vIS5(t z0d=P&xn!3|VC<_?cg~n{Do|Q)hShpeT0I?TM5X;hkK2DsLjS)(v7PnK(}ejkZd~+@ z!Zkz=N1t3qA-Z0pXFJ3i!I5G4eCJ(NxbvG<3aYeE36lp|P)$8e0l48=7;jdM;$A{_qlD^!WOghux_~pUT~2%RnHw zFlu#Iq?hdugKcF@bLf{yA&x)ca-;Hq*zo>Bgi9Tb&2jr|32nBknPr{jhE7!%)58p=_kf`5N zGNd$m5-~r$ywyjGld&zcW@&_*&DhBB97v+Oe69> zy)nvq`cwgD5O`~qVuFHie*7fUXPqjRBNlK|xQ$%uoTtu~HOSx!5j~rw>ROB>Vx?B; zYa;#Jb=yE?Ug3Y-%Z(xK`Q5gqOV1<>nscK{5s)_`y|1t!#MJ%<=XCp58i@sWa!K(m zy%404dUti)K*eBN+EXaN>jIq_-KoWm<=A>-?&dba5SHA6YmFx^2#~H2tLV+}N58+yB%w(%j5! z?R<_7_OeV}s#nKcpHs{ldgT6OaI}fUs4JOY!uq;<(~K=o_H2{GmTS!Q22F)Z z3UTMu(E;Nekq3dA@ldKnS2Zen232C-qXoKG^asc^J`R(r^SQoodb-OmHT|kP#h9H4 zRLY*5B)Ad5y^=eR1x#`)?-R~D%$~LqzYg|v3&|AknkuZyv&T5-`OlOSZ%UThNVl>0 z_Uev=!f8@P;=z`Kw#Vi8c~nxf0k~=)KpbfxLgkw4<3@vkMjZ% zXC79S4^ae3%LL*~XDrPP9|RgcYct$wHXh@3UotA9;&XG-aXcCMx}eK=GyD5C6a7M# zRIicd?OjMAQBYR#IO+KwcwrPpBny-*&dC$M(eW#>a9N%7Pyi74)q~moi&re8 zzo}8VZ&!Dm@Pe+Bi{n?svBwDheMvdsR0dLxUFkOl|dN*Sd(9$d*T5 z26aEYd}~OO5SAF|yn1uM>49;G)&%;p22;K4?YF}ms*jMOK*itDQly$Wr(tHY0toSE z+>v?v4DiDPxWkAA88C%s-(6paj38n^t!e(`TpO*Eog1a z^c%YWpbIPg3o)4$1A0fJhotVjK>S`g?V-TU772>*vhmWxWEp+Z@#Uj8_lVqZCpzdv zLa6S!U6=OVyQm$BrK4N%wi9QGM3+Db5keRi^L;kXWG)TSzEf?@L4O1tzxC})U((OaHyz>#vAbHLjCA~ z9j$0isV1`AE6mqj`wUpsfv4Tm*ub)PJjR&#~2+xe%Tt`_7lU1v1jcRDwU43`FJe((Irem}MUBApa$V|DA~T|Nl$@PJ8IvjC4xU z1?j*U>GM?|<<3=!^su-G)+Es1KJUlrVGb8Lg4)92X@@4%(R|?59KFL#c^=oKA)4a7 zybLPIpC8@|hbh-Td!XyW&tIl6vdw}vKN$JE*`#r3!}*#t*d}x`Br{J$Tw?sS9#oO% zk6-Jt2=#U{h$OwS(wBAo83T{a;bSc*)-h}1WX-x+;WBD7ihvZ-m|n4?BIU@+&Y2Fn z_w{gG^2=;A0&{N3Gl*(6Dnq@!AtaTBE&Z%mO#@``Revqvgj7UaPb^IY9nr56jzy?a z?n=xT24P)@bmztPr=mSnfcE>5*4(k1M=`#xjE+A8TytAkiPM{j?%DEoP?*LxL^+*( zDLv8g7*hN*ID*5Mjc1|_ZS)8A&A3Ouz{VWDwGoC0Jf%qzo2XZS?UB%V4^fP?Tg&*xiv~?ae)p5i z`{pmZ?S(_AcmUPiNIJwd%3f+%_RV`Htz6DI0Fy8xE-mRbvz0BXJ7y4RoMmPBC338w>d8m`8*D@%E zs)k59em#@W`apT}PMlwqp1RsR@x;!cYu1eN*F@HqJO3k&!-U;O7X&(ci9t_*{IO{? z;6RPY3ONKBJTq4ToYHVC8d}63sZ>2Yotlyn*iT-m%u?0@`N&_K2N}1VlhlHmSs0t^ zB^$7POMlYk9}fk^6~t;5>7v6>DmyJ;t@|Djzph`2!0uYA(8(mp-fZ`LV&T*ZjId45 zZCXVJF{X8hAO9PXX*cyQ4|oO@iOr^uiw1imeiX&y%Y!xa4 zvxoYR=JtGYptvC4r0<0kPkECJ&8oM0?*WZZibA=TN@+nB3%-mw!O_3g3O1v5bc<`M zTrx9E%=zbCN9Ms4a zuf(1_N_FQ2`5mV`_IqSE!pU_<+sMgd@Z|XLMJ`SYDu;pad*r zaBuiv$O0))`Efp$|L3seRR16 zPb(6BlyRZTCv&tw(YVTcj|q^U{(RQtbrMdGeC|LjHR%={Tx&tW(FC#*jV zn28jt+d;Tl#c=r4(qbCGq>3%Qr;MDsmL9t|kFY|?>6DDaz?z^Vc;2m+Nu;9G$UD|5 zmD8uu)uvI%O-;Uw+r{>Ez|_h7*rcy60B@sOknHx+?T8>ChnU#-&x#!Z9sKX$u`nN! zHQsJ!AzN0dc%9HmwgCQ5E&2$?yV0QB?xo&;IxsZX;m;ao-C2>%G3dxR8T~oUo)^ri zkt!Y_IBEq-|JdDUC{K$hm-ocVo6tRb5z6wI@%tA1mze#ZIaF$DW@ArA84o3GKddU2 zncJ}ndQd{tRz7wGnl<*;`+q(o;2mJ>xNs`>o8S$`gkTmCw~dmT3RC3+wyis~wg}%P zdCMT7=P=Z=zaH%2n9pMK3){^xR)-0b4FZyF2skTqd->5@IuC}%J!EV~YkkI~%hczu zcdlanm|+gAW@`79nr)!UaqdQi*UFWLf64jV%LS_UMc(RaJ7cM~pk?|KkaM9#_hx=f z0dyk4E&Mid8&F>DINFxpZewltx!R0lO>?wOJ4Wo7o0|sXJ8+v8S|~Vh;z-vYp*#NyUGh&tpYYzsK6FF zUOJVP%1VUMjGmikFtdy$I??CU>X&z6eXCov5=3A*Vz}2xvJ$HApcFc8BUNLg zNV=fN<`%g#mrK}079_q4649f$l`sM7zavmcxzGG9wsdM8~r751%f6e4?K;z}y}`rJS;XJ+zmu&&*^dsl`1;4*6% zLrFqQ5THtL3Q*3mwc_7utf{fe$`VLnjk$h9^9!K&++XxpsWZi`JIdSdYfLN71GUsg z+m1_=Wx(+9i3x7r3T;qj$F(&ULPG+A%~o-9Ih*pIzVH716h z>%a6)4E7LLlb?lp7UmHve?0lodjIDyyj;eth|1IIvR!c#Sx4(Iq_N?43(jp69qBwALd1#I{5_d3piM7VKyF5sLF6j%m zEhUbc@KGL4itE~&iJ3I*ZTROtv29Zle6~+h;F@co=^U+$YZIs{;b$`8QmVwdZng!c zOD8DHzG4a0{Fy4f^}e9VqLi@D61{Q%w{0Ezx`eCiov-5jMJ2U&B2?xCB+RbOA7t_T zv2)H_cuHxVHqI8rznPCNe=G*OQg%js=&p9!4{<2d$g9ZOd>c%>%%)Pfp6iJThh$7)+fL9-!EsTHObWq$A=&nj=Ypx3oKy1Y2V>NnBO!dFWZ4Bs?>E8y5$Vdj0eJ_N>^(8#DtC&F=M`bvYCS^>Y|^#`iN@ z`*%(miKE*a-l;wi*0?0qn0}k&CmZmlZL;!Q$61rew7@|rDwZ(@OMcY_EM8SR5`8}+ z3>gIZlfF>9*i;R}V5u1@Uoy8j?Nh-UN z>5>js0gcIsfc%sAX@9iMt&FbDAXPC$SbIZ69s**PBnI7-CwSM^hVZWKS;Wv7iDDPk zCyNEp{j6<`UaGsb&Ij#T%|eb-%uV)fXUq)waX|R|2 z-7kSAf9$748S)VWp)L4C>L|l{pX9`R_I3_(fy_CxO)*P1U`{g8zst1*1bREJ{@R3)bo~hxAr6;2u|yV8*Y<- zxfx9DKm~wveDnFi#eL8^8m7s2`hv3Zo$w;Cc$~g1VVEVK{{VsUcPY9UXaK_+Tk|&o zc{9MGmfpR{Hm3fwgYD1=JlQR~+L@4`|DhWAZ@iA>U$Zo4GV6E4OO#o8OO$iieE==H z?f?>Tv2Y!EH@;eW9-S&94r&g1;HBQh)fv_ZXss0sx}PLVXcyyYA0^w5=-0{!oC0Q6 z{h6t81W5~cJpjJ4F>Y(ZF5SI4^E!DY@*_-it(>Y3%ePaUy^__4`0=Y`BDa4REbACZ zD;i;h^mDNCaPo%A@-zN{q9?1gdCkW^RE*A7;u|nWG|-vxtl|4*P_(Ksuo3_8gU|Q# zxN<`6=POu6mi!O7HXAmj_0TX}c^uggwu|%6Irkht{8l+Cw8c#bez{mJUH2ZccVzCz zs5zV%P@X}PydVbd_+xBugqmrliN=7?0m{ocuppr$hKz4YDvrq1^og5p1QoSPs2WLr zkJ+tv=O@QpM8ONPB{dd(c+{N+;qG{>zxD&(lI8o`!Zu`4bO8(wdQ6W`EWz!LymWVw*4I`osTV*$KhZQ(%bxGJO+Ge}#!lgELX@fzWq zC zUnqjgS*|fn^c!Q4m25h;I+VGl51I6OYPw(DdMHNc;}Y_oF2z1%554j!T10+-u3DD3 zLnN5ifCqF*7UotbRWegL#|bh$B0;)e-Dt8F@3jc3st(JePHS3dk(EJSo?^nnl=z?CHydoN%V3(3VuZr=Fmv|Lf4Q|8;1r)`6jV9s@O> zEU)~|6Q!>DKK(D&zB{hTY-@J_1yKPN1pz4*K%^F4kV z%1(*xg@vcf(-A6X?ho>#8*?)FAlQ`1T0#?AB_Tks8bMHT>8k5!HWB58`_wR{6^&s@l$~UU@Gln9w*+Ml$DBDohS~{P4UNn~$h00L<{GaIAQVB0&6Q!)5=7<; z&&;9o87!Xy{D)e_fu|4_OLi62nyFyrSpUGEBmG zXm%Qrj*p9s(iuMxaN@6Op_Wrpw zU~pgE4elF;-p525l2&T+%pPx13E~M}_+1k= z8`F;tm%i#}3+{$KRFhC~t5#%q+*2zR6{Z)lJDJm+QZ8=I&v^`$JS$5BX;39)Nu`{X zvLC(K0pDdwP{Ag;aVXrM0B}Q$1l%j`EN`kMQF_DPp-yh0vXMPq@g)#pP{X*HLrq8VSHjUSKcj<8q|8Qv75A5acbG8uX?ZOU~#YO@ax72GVu{TLBI@@hOO&vDS`8a+`&(A^BeqL@lw9jbZQSMC${8}gT&3D_6%+hmZSuYl- zKM?wOqc+u8)$q0$lZA}7(-u95qon*;PkQD9m9X2k7D+E#HrFQ#i74tbdCi_R^Y8tV z1C4;w*P~>j?SzmKekOl8B;i22{>j#k79D5n+$n;=*2hM?e>;4Jvo`kBzQcZq$!+Z3 zIxcnu?haufEkvsvJE0UDQQ-2z{jmr>s%YRP);jM>7T|fI0?X?w1-H!WaF*l4)8rNz zAi)}vSa05%wKu7>Dl2FIW_L=r&k6a$YDx(3O{L0h+$hQekulrZ$q_+YO;#87Xy_{J z-`mL=aAeLi&|f_}7BD8CoIj`*9(QCN+}wjITRQ!%N~+vU$WLdwW|2)yHWby3Qp^eQ z=t0HZ+qi!sGw50w&!y~GxFbt_y-#O%ViiVXf{%K4Y~ zN(<7#{Bo3=1EeCsSCu=lu7oQI5TFy>k&^*&%6r%wlfk1vgX5P8C$>CTjH08XXZ}vg z3S&C}P-c&)t^Ke?doC+LA;n(nURSv@1wK-^-DKmN<*=?7`fA8l-+!1lI(p>5IB)dU zZ0heO*y?;0RtGIjPgG=1+1v0b@7|8PWRiHHzfuhx@1v!ov-PI%j}G%5G_+5ft={}? z5c_)_fL94Fx%I>DLu%d!$Q>NXx0~|SR?6iCY_TH95@X61k16*boeF*H)TR`nO@Ki? z^VL8;zs%t2+|Qbq-x&_uUO?Y!(kRo5z_t`C$x0h?@)H@T&#+P0@6LsqDWg>R?k&Kp zu2}U9d&|myxn;_kwk7*E%8P$Ck@&A*E6ncjivg9i#wjn?90Q47 z1(ctIbf5Cs6U+@OCI1otJ~LX*d%V+4iU8XxRPE({;9t$$_wHxBsek{fLl;g0{nF3s zVW(co5RA@+-RlP6F#bA}uY?CHqyKu$%&o(AkiD{VLe0VGTh?@OKdW!{RnoTc{<+K~ z5zvb|<7=!|l2C|kSXta3o$?yZ-=_>3R71&e1zDcvro8FDcIbTf7+(m~$4?(BLG&e! zBW<3gL8ey4qb+yCQyVL%Zz_Jfqs1XDvsl&g8_fCWAueEF)WnY;(oYk3neOs3f*!Cc zQTy9=^z_BHk44YLn{33>_y_-fxBe?rZYhfP+GWD0vJpmfW>9y>xxJGwJufPlJCWEJ zvEyd}HzVKA3V!0Ws@Ar~?-+(93Lrc@!o=5q?f=ib>YqA5p29ql%c(s6ozS&))Cp=+ z{|2Q0DF0{v^M6`~sXLEi@Kf6#d_OxMEanknDoa6}DGk{@r=^H1Pi<^zyxr&*>1w@G zzjk-?^%{!&a_G#(nu}kzh+2DJzDYbBx8sm^te#P)~rrY8ho!B zF?q2vY$ssSD{^?PQy99IK|@e!EjY>}#*{JA?)fwTyXF&yPwxmBfU_XvY8^EYH~TOt zue$LI!#M8=R^bJ>G}u1CIriumy%dDIQiF2w7>H!)wVF{4OvMc*O?F6NpS^~=XMekS ztsYyEFl^CPP%n1DHgf*Un><&oBSp1x$m-G{vxbF7yRWqM9c~H4e@XPx@VVf`GwM`Q zj=C+@C9|HAKp6&_Nj7Pn_paCz#o4ODHxd;PjJ(x><3N6HzR+sHP@^E3e$A&U#Gx2Nd_1AucHdC(JW*O*-4UJi^t5 z2Ai6M+~MSwRIXc?S#g?EsjhC#(=T=5Drl4$T6Jjl{4BORMyFH0``KVU!3fcrq9S8W z4j~9LfB5!n(uJr0q3^fF9ruJZwil>S%taxd?A7t&Q+|s*jIDuo4N8=C8RXtv(a~4D zR&vJGl8-APKg;I(OA3^&ay1#o6>GjuuwSKHL-%AjdNd|;-hy5yxjbqS!?-cML$>&H zfi%Ub@vWY@ponh}u%#coVK$tBC+Vi#q_fe`Ti(tS6 z?tR1Hqo!Ro>O;RooD3aFsyMbIC1&W6h_u-vuI(|&#!{S1=hA$Uj$?G+aS-ZQ^cOuA z(IhkqP01paE=);-%Uf*rP_py*GKa3(D-_JPio(Ep4WQMYq1pE~W)OI7 z=1iJPTvNNY%s^Y_NLVWArwU zvgYHp{F9Ieg?WuCoOY_{s&fr{k3aIG@3HN^=rEy9XqtX$MGf!`J%c-Je)O@bgaXWKy<` zcxZ@2tcZ21LH3ogbyctJ?@RA~y3F)%^8iZxdP>=8kbzM(owSlB$YzIuc`8 zVdyEQS$B}LM{7M#u_$Jh>w}^D3R(wyi~n>7rc4?D!8E z5bq9O)>Iq8czBChvfHGG^x$u}E&bC%DX9$w2aRT%>Vuj>^-F`7ZCUN^-Y>E%Xh3Kg z=pBdrrh$zeCwg|jn;@82&};~%l0KB_o*oxBhd0%ZC`6b%>8ZP0{o0EsCDb)o*Ryq< zC7pCravrsV{9N5s+pbWbP4~;w+m=LWrw59r9{O`00D5NmZyb(@XZfT`*&e*E>i?E* z8RG_CKH3t%^Klv_oMNqxvu-26y*JO1WJ6!orunsx4htwYa@%?2ym6o4ZK`Z8oh}{Z z^MmHmjE-xD=VcXk9%unklBbe3!@+DFgIM>PdW?*)4DSmlc`w#!rojp|5|SQBsk$9F zGHL*a5Az5(Gz1UQjNLK3Unl<{SiQTY-hKbJR>D`O0Bwn$xg!L94dhw>&QPM+hq&`xWc&s+0VJ{_D})!9aJ(VJXOZU7qzvzP|PCY)Z?qeb`t3G7LZWN-ds!$%_Mntgv9ej-1(jYm2x6!<=$_(JcAU zI(!pTjvH?mzG;J~C4t_zm~xTi;P*L7Vwsvp8a(;gI^TYy@YAKe^Pubwjd61>09Sx0jeD$t@{?#g)c%;bO3?*quw>iFfVs{M1 z75<63gBq5g3TrmfJL$9rY938Ov$>_<6g%y^Dz!PTTo!g{Hci3Q)+3mEk5haLM0V~) z>Yg4ksFZiCj*&H+=;{tX6dLRjtJLI_AtJREHfiCJL;hjCj&>(|7Z;Y43kX=K(xI$%I@fZrh-0?DjBE8v!FnQ)M9O`El zC4#C&DLEbEI>4)X!vZG#f=aig)`(@KF8Lg8g+AIHP+vz- zMAXu)V<2!Zj*?POy{?pzS7YOKiP2)G`e)EFwI{V$itZxxju0!{Ds-59*O7#X=fxcy zn#@Ui^3Eey^i>}`v-EbE3m4(Moc*S@KF6V?uTQ@&Vt_Bikumc8W^>0y=tB3{$4nsG zR*BfH2)wKo-Z~gF^=Q&}ciNHVIZtTM$S8xz9(kK{RgUw`USgkKqR{!R3&P5Ll894Z zL{s{ziO%;(l32GbC0?dZYFY=6jgpe-W1$cZlT;yDPJ^cMIK^z>nk|gG*>866wmLx; zt*Fhx)b;Bl$B6gdGXVyLG1k5Y&gd2fj%>p^u<2!&jl^~FhAfoBJD6{koBY2ueULIwr zqyDQu@wfMEtfzTHW3JgcQ6PQL#A1|Nu+xl7BF={Qs3#jZ7^3#N!O+RIc1_5ql_S3) zWKH8gOT7PkIMdjqmfMtbkG$)g9Hcl%uIW<|?mfCaKEiPZd+L=3A=U%LtBI(6?jzDv z2^0MYejxdLl}tWJ7c?Qo+AA13?^!Y(!1rx0Zms_6jHKI8}{2EKTUegAW32@|)l|wXe zSi#Vd~DBJ@TNZJnjg03A3?{X z6mAH_I~`pooOypoWfyPKKpLwXSwEuBDT?mpSawmk*t$`{)}#ZvhLV|)Wzl+=lt33t zgV!J;=vTmXh0OmotMGrNAeOXfmBsSml2y|jQrK(Ad%>cdPNA4OWHf!>DmqSa{h3V7 z#5G~&N;$+CS$9kF`g9#PqmGlN?Qd^=9_)qbaeU6g!Mz~gYdf?uOe7HNYl*!#pZB!8 z$LCypKRykZGh6zFyKS@M)VhUOmOv*ySk|}c-ZLKYyaR9W{wI5^vBtq0$2W^!I)uZz zBv@}k)o~lV;8urt=$jIlY<|~JE-VMRacx{yQ2FlV&mQ(U$fMioqSW%Sj|{m6QQWaJlQZ~D^MP};W;RGTc*C)uG$hBXl)eq(N;>R0kh zzk9gbcfxp52CQFanPH?K?g4RY%m%PaOw};Sam8?S-D`)^0Q$9wtU0<3F!%-uXr6SU z#I5_L8`qAv_vakbdmeb=!Wa1(dmyM4w9i)X4-o2&S&1ZO_A0rgmX8r%pRx?y5VEz~ zU8Jj~-n$HQcoUFpRK${;HphyJUuLA8%(AVmb$9eY`pa4>=|yjH)-))Ro_@HW_<33| zWUE7ab+oU@A(V=a*Y{@Tt*Jyf>2ye;efFaA7i8l_WGn3rt+-A=8Ti;Lri4`+2@ufI z?;)S1=R0X$s4bI_Q|GPLRkga43eytzU?@{-5VW(3Mh}yp^y^W8^4(n9ldpyCL^?HUDY|}am#%zEmyc*#3G%tVUezW6(5dY{ z>Occlr2KTbo65A`+bJL)Fq9*BO~VVDbmRkpK}A!xyq!eug=%}#!scq|!r^GNbtYoYBV;1C+#p5n`q4=FgK z7?|t0bu`k#4X)T9vY=V$jmyH`g~X@dy{Jkd9~D+?LEkH?4%IjJMV1L1*_%HeIN%1p zzKW43uey@o0`K{?G%Ii$mzHT`2O5+;*?L9r{#6jNlvHlW9(mWK9Sj6Vq6f!TPxro^ zL#*jd;JVlvidaue4j3aRgo|u~D|g6Q1mk`nM6ML^ui7DE77UHpqvd@soZI{`%YDT! z()+xsqVeJx6T7SrT>@YBb_pQq9%n8}|=pX=ZQu0i&Z%dERFygK6e2WUw z{J|K<*)dF)vibIgng){dx->n3JpE_i# z;<-`+#-;qqE4v+_f-3$uzFo42-O(+i4^1EE)QOdvp`MqZ$~|BjBTAm%$C6!H)(L!fg$SVYy16Vl9O(eLuBYmR>fwPDqIjq zQt4~Y()L29NnAfB#+9V&-W1O!_aOb8)ci z*dwC*5t26Z&=&{E8)VIJbk;(GQAUU)TDAIv-LS_bl*N0J_^ER#)kX>K0YCWB`O~k? zMBTOyPLG9%llkoJ~x$;r4S*4MD3T6*cua;Pr;oAU9OX@hXeEfT`i zoJC@E!e*Dkg)ar|_S6V=?8l@d>=|o2Hkq>4dz)+K0}RJ{DPvs{u4+ZDQ&y+o(Ct)& zKF?{)$Db`j|JxDkw>S{wn+t~=mr}E?83Z)B^o+G?uR;@qytzGSkP^7H_}P@Of@zes zIl&nDU@=v~?#T`Vb6m-)4i}IcwfO2o>i8prt7k;mrI?rmGmRQ(JD;Uj{)|oJwNcUw zv6YB#b$E=C4sXc%OpcZm>`ZNw2O8qGq7|eEE5>FgF)`sD9;&2!B{Z$w8PeDGa74DU zm0FHivk43ZV^tc?7W$Y~Dy8F+3fhjdY<||L{ieIgEw;*0erQI#l~gVznDzbM80!r! z{~P_4ab`hB9WgM2^8L*;6=nkdyatpFXE_71FE8|Z*|u@tgvH-uxha zRYPMHYYS!I-hOAcOe6foCLZPhGTT1Q`GmSV*siU)$i^M>%RxhK2IJ1TBHtRHmHuTX zPdNS5YIVZ{$7U>u24Al=7GL=asG?NKR@k=(rfA`lRG*csKIba52>9|xLLP2B$m_G| zB|(|>;Sab=cz9F26`=8#@w=KtZ%Te*R`D3NYpfcG$5PeUf~z!`d9%a=f+gUKRh53G3_^M$hrgjlffS`Uzdhy!={xKff0Hh*|y+C6B3!Vk`D|3#~AgqhO@r~ zx$*YgQpG*4sW5OVR693VwBfM9_g#Y8lm1<9-i$32@>9!Wk3UA;4!IM zbPC{`?Y|ry0qY+;NX}9@N9S)5)<|H}1}dtS=oA2t+4%DS>;D-D5KDn}$B>0i&2xr< z$Y7+abRuL0D5(Pb{-cxnuYZFW1LKcXyca;(U`x)CO3xmd)oPFEo#4u#OaHk`KM}Kp zwOj(gn!N8eUv3(fG;cW|b?N2=b;cRD_9f%LuI5KUI>c;$g`PO!bz#|Le=Q{eYq)c$fPY0WWO0HLq$}qpP@lR4XemX8)}J$#2eu?#s?|>?1&3 z2vKpkN)Dog1V5>h_uZc*`d=3NV`uwYVABVG$I+WJVy7_a_eV~l*r|`y|LM8V8~_j? zbp*8qI7|z`fLLwmWcg*<1#ez8RsPj6k$;|IEUjHF_G!RXj1D!al!W!{>CVXzmq(Eb z|LI2l7A63kO@>I&XLHIXp^%({VvV}?06n**)yookevc;n!RpvQP$B!f|Liz`Z-~fu z^pJABd%h4EYJ3joR~cnz?VVHrw4D9t3%(01*j71c%>-!kAVldcJRv0R*K-#0Brw5m zS@s``{GoRT$*+4>r|O2bFVesx7%KNNfikccFn(djv!h`Tc}^t<#QRT=&7FoCRwkYt zQODazxHWkG!1nmE^ik~8f&%K*{rn4rlz=F4(W$!le|8ssqw$~#-FdGf@7cp|2wP&p zvJLrL(7+vnFx#B%?9}&X6J)6>8|YJ?un)L?LYw^HBe@)5kJg{xN!D2=h^J(NLM|;k zW+QdC;;@&5gqC;sV)$AP8bo8)QO}-S6^p$|rTOYf2_EC#cs_&Ke7)b${C{fGpZTYg z{`AS$1`)y!m<(!kk*^iCWcV{kkpx(idi_JVTuoy?hE4zKt3Jt*aoDr8zdTk>$q`aZcw zuQL>!55S|8fR=Z`G?65Z@a>1B|fL>4gbCre4Ve-IP@v2Aw^(O<<8{7V_C zWgH-nE6fe1+?*!2Mz`L}bwq!!!~k3bajmgFXwvs-t8BM7Yf3cICLn23Ff zGQYe8|4HYOy04%cKXN30;zBH0h5<7OnOCtXf(@-Z;Q+r%b++rvwvu51FcE?(WrMeo z0FZ8hx))C1T{Z2w4#d1RFD)4_eQP=Z%0m{R`mj}%Nv2#YKXciBuo?w{W2{adEJ+(3 zNBF7P#TWcWy!?|u9GAuf7G!M%Nn(mRF6>JD03vSu*1+ROSBw;;@90SD-Ptx@k{$s4 z3vu;>Z~ZTextaY){DHR5M}Q_J4)`%_jJ%^0mXcZ=3WgHm#OQ(^%B!8nB|Y_+RmJl- zKSRcn_`gBf$Eb(o#ihO$;pfPM{T^xvaseDB=rNZ=ni2B1H=IWPTxidr7z z!sEC~L{d7z?nx^Ms=x^s6(6`Nw6!3|uf;BS<=h7QrfNqPlE1n5T4~Ns(~#0`RiNkn z9G?`hGXHgn-n~paE4Tibk{fQY= z-{hemY7yTowK7@^W{}a%T0AK^L;#{UG$t68icvA^4rlc9 zXcdvBLhiVrf=WI!oEp05*6W;rD+KY|&yT8TwHN%M%x3xz9dinGnv}n^ALAhp9y++0*cdIw*hyiPURZ~Yz`!`V)FGKQDFRXfk9>CgNOdK9!vYUHASPux4+Af zxv*O9El{D;-@>Rzho~WoVm}nR+|>i)r<^=;toJ>f6_0?_7z}jWSX7Mi^m>9n*-}fH zc!5zrN3>dU7L5=5NO&b@)5has9v#jbNjGu0|Lh?<`YJxmCK~{}H%T zFNq3Gjh_p}MKvBvI_Q*k%vW~3f2Sod0dM^rGL@%(L4;}5B)xo3;N7$x9%&EEhZ!zj zJW`}RF65})?kMJ#8BhlPdhE|#yBP^^ib7{`(ksIe`0OLL>yu!eX+4S;-17|0>mDv$o{D*WZy^QF;daXRrT-%iB9-3Pb~w}#Fe655}iC@G6w zcUg@VZ^}7vBR=KT?L|quC%Z>eUbTJvY$J7zEZ-nN*8;832!h35yI^U3>01Kk#5mBo zzJAs5`4gFnq=;A$zv1o0Dd?%XmLVxEW&9m2)h$VY`}bB^5ELrBc!TwiVFGsxp3@eryV3jD@z(dyaw=4 z@i_JZd?8hPs_Dpe&Xoz@n@pYn;PeaeodW&iS*#5}$)OcqvX-1BNz{B!c`0<_(6hcZ zWT7!9=Y>4Ph`nDD@Ezqb+a{irJ@|=f*u*$m;RaZ^`>j!Y=6PqqP|L4zianX8CSc7x z-hVIgPYv`_fub7w;R}NO(^qJiE*s>KgjVz0Fr2P7gm&5zFxnVU*J!7)IAc~h-s!rJ z#M=CT;(!e_E@dcR|MJ+X!Q01c(n~pyf9ZMu{&0WIfdn2W?0otRgfvpI*7&zfX(Ru#mY) ziQJd|6V7)@ulMEYrk)>6PT3|Xq9BG-JZvM8U(PxGfS^I*tK+tZ%CT&w#XbfAL=dS$ zjdrzYE;puQ$?|#^lLJ0SL#X(gcYOb()B@Pc8RQ6^cj$BVQ|0_?9@I}s-0u>g zMmV{o5{9={5vZDC4kC=hgT_vby?gjFhU*sJ`NJfgg6a>%a+`*$rJbFh*qNimPgXbH zFf_a{E6ZDRwD3To)As2oJG;(jvmLX2;=ly3s~d&(54j^6RnAqpw}GmO%Lg&(74(bZ z;CWR;Q!}ACefbtE+kVDBmFN#`%HOW>-#^zFsYU>@bnQ++`FroRQ`B)h!-Rl1W_FDP$^LW0xC3nuk-}pwKuP5~=fDnsv z2Uo0#mL**W5jyd3nuy_rsGlv3j|k>|6s58e_2zd1c>VQYf>Sdk2g z*}vaJ^4nqsw^;>~w$zsi_S3a$&Tut*z3@B3GB=-^`NIGxZ?A$Z(|VBy6_ z$Qg(>>cm|VSs18+D7t-BNRE@Icx=_x|;SB(@hUurpOeC0wtG z8SZuoc{hFT!oBKwqaWVn|K;a|#U0*WQX>H(*SP4x(5qN82>K1qeGj{q>y}AF7}rM( zDM0bPE#(AenjVK%w~cE3oR~QIko73LI;^i5{TegvH_vM!xcGK#&y)KUyPlD12LsSs z#v)kw8oKw)Ig}!{$_XC`No*4ZP-RC{Mgb^ZK<`BWPsG6uZEmy<*}XOQb|2nFhMp0k z+xl8TN$012GnMJ!rke4u2E4Mc%?$bU!pX+umwy-j@Xo}<#w|jPWdCZQ)4gU%R`_}i zfW6uZCT}3Ft8U=@L%B7c0}T)A-uiFmSPNDG%3mwPNCGjEIP%*9fEJR;X{8rK%GQd9 zQ^TSaN=n_Fwyr4FXaE<%m)TA9EbEz;rrM$@pLo^mI9PGHiSO8WUf4HM=fnuY*V!ir z&spcsLk(FyZ&vTsI!p$jbM>Q`b;psrZW4NIM39QIJOG|e0?J3bdyXuuJ>Jj-RR>qc zz#+Rf7*?LCAK1RSSQI+(hykZ4LXX4AFK}l#wzsnUmkAvhwxyRqC)BX*^uTH61mr!i zVi%lS#KW-|MsSAH4=U1a85>ME!zly)_;;9*LR%R7Xj6#M*WTL*o`97NZl2I7y!S4i z7xlJ%m*gk-1=JPL0S7Qk7;ss+0+Y20_Q^}{juKmb(bU<%=DLZ_`KO;1=P|}j72T-u zr8iMey+7^H%7gHp%PAZ&%Y;uEyixR;24|9m_t64@GljU#KGO`^>`0(eQ0Rl$N4mu> z7jvPS=iC6)=2@TwJnBQGh_Bqu=-y!X5-;W=f71dXQdXLxstms|Z3Y)qz|u4A3({#yUml$o;?x`UqXBD8&ktUcvMVXX8Z#zED)g_Bb0cTqXcFA%-qfZCnclxp~ zYcFzt^M^{Z7?2mn>hchVp`x74Aq5S+rs<&D{o*}5jT$i1iq$l>%8^YXC(GwiufMh&ygKu6+VKi z=|A8=#S3e;+i@VT;8GO{rd=#ph|RQBjbKqL{Vr;|->^(f#mTCs8H8FigvS#PmX> z1@pD=?krm;Pf}zC%NLuR`{S#c;j>WXfM_4NK;o3!ZJVjr zVM_|d(iUCw=ujJ%gD6GPQurZM!>X0p?rrC8xfSwc9%k`uxRp0rHvZ*6csfgT{Bq{K zxlrox4%;DI%X_;-j}j3{mZPJedzGMO6j^L(?Q~}@%bC%!wK-rg1{o7cub*yi$CS^U zp>wH+Vl8SLxt)0bX_^Tn*oKrU@;7(te^Il4xd37KL)-7vz={v81y@fUg0|s%AHb0H zy=vrZ9IJD5IC$K*GdZ|y(Oags(`p$n*t#-R=g0`ysDzHo37>DIKdXu-ItZtlhu0uj zFoI}q)k_M3Os`rOd)r-i(x2{i)(CWNzddO3f@C4kL~%1HudObp_R;CgvjbSI9u7|fspSGYIgICW@m{G;4qYu;EB$fz$T1l?)F)J0yj?yFEQ@fEC9tRFR zJ-4~oIkh=~Fu9BNqS&x?U~E*)4e3J4rbd*F2lz;d#Y_p}sj%OUN+a*rF{UC{i&zdT z?X|PXQ$O7nxTn;$!pf{!9<$r{I&2vU!FzT{rY+-WB1qjCM{6OrIRgWHTc*kDG4x|7 z-!a`X8#xgZJ}P04oPDZ*I@86a#QUj=dAyUlWID7F9T#IxD9+ra`zU?P>N)+r2{V*W&0~}PW_mv=D%2z`2F;(B zhLygh+Tv722vCu{PG(?GcKG@mhwKGSm4Gdf$!`P=agSFyAt5({Vfqt1@|P{da%8QG z(ix6A#0fu(D@)r(x(e4xkO5#xK&?v1Ld5f;j%CfGQBZdRzAu&<6)Edrid63fST6+y zJM#cA-2GrvtB1rIua(k!&epufX@!6(>OPFZ{PO2gWF&t?y6zgM6V!gJoo+ylr4X#wjn@bt($@XPm2q!t-w1V7qS-ENg-$OI8u0gLuhc_anvola`ZNFxL_th?i^A0nhGIRX^(Eipg;(shS_))OV$Do4~f-APb=+drhm^av6+LAFdLkp9aucDSlv#g{fn*bg|Jojbw z$$cDC?=YzfNoAo1=%QEXq&y8P?f1R^qh`eVw zEqGC3Yo@K7?4L-ozUz3f4)lV=-V@GJA$m%h$;1dk_m~YJKs4pZ>mKe_>al+TNWwfR zKs;fpP}-!50zsvnx7CdRizW@nLj0_FIR|EfA@}ypv2d<+4Vn72+4$Q&Wri2(F5zMN zg$>Sj@(0P0zf|fEUghS~D$Ddw_sa3|dh)hn4n(um2dvj1F*1!-mX@xjHEV^m^4IAS z*#Ja$fd9a6wor-A1E(kKKlUCL?zaw4O-bi-7mXr1Cj>DaK#HuD>GEx-X)@JBMu|yp zHJZK+e=V)nIBCb)*KsIFdi9k8Q#5i7$2rtmT;6Uc?olO?+FEPpSTnc!@!$r{-1xUK zLVcH>fSTg%fpQ8g*f|+Q_;i0ghU$NQs+eO0>FRd$B5c*KgoU2PK4-~@FRj+72g;YdhA|#d z(<}g>%gMerNBZ?G!@0+cu=SFJBcZ$6}2_)uBmV1Log|C^vJ2M z1k13;Z}aW?#+kIz9>q*6-%TxNV6%z@J1o>M@fQ|?Y!rvk`gf?2=cfDn5+ z)&AhPhg93p#&x=SxI`1ENu8l)akY^?)!@uxR)ZBGpukm%g_-<)ns6OGJd*IBngULG zrt-8uPNMwshQ(p2iAFYMnVL&gFj@63hp;g7(Z30%J9-`q)n^`Bj&PIH(&6mg%{DnF}ho9B2p7>l1@+XcY zr#HR`-a|sodi=zEn7J-$rXR&TrF`WVKF_yWpVR?FhSTHU@-X^W!p0;K?AKs(reDK_ zHjGE`aggC`oOy#!=$s0~vrUYpwOI(@bGF$vtA=R5T#}fuqaq{ES2k>M3s1nOrmbDR z?+l}LlZAow!oh6uv8ukqwMbB1cXQ!Pv<2!FxK&b9%C^L zS_;*|x1?`T0fJt+E)i*SdDx}04ND~tX!)BWzXrlhLwwbeU7dm@uhup2Y!MMaP0V+wW}*$9zBTdfqUCw%5JY@CGz=57C1i4Ux9B3zwsZ~~(p z)5RCBC7Dy)o~QV&%=-dp@S3jn1aIM6iXoFdP3hHd;^j{VSG0Xi$B_Yc?}O@c1i=dQ zc)0E)3!(RB>)?TuiLvo4i1Z$Li^|1eil>?m#Z4IM@Be7$$!onhsK|t~bQa*ey0@+@rMgG) zT=SIqx-|r2T|x`<=I}wGqx?wJR?F~wojw1RFuZ^3OM!kH)qP|-{?%}24~uvr%mh8I9YQIIDpE14srex}m;hQn(A) zcBuNc`Qal+s5x-bqxe#7lHA^lt0%5r)#BTp1~VMIey2)!g*ZMkZ54o=9;wUiY8>&y z97I-+9(fl1?2yx~AZ*qxI(3&vCqIC5So;=w?F7AAUH*2z0Of1*fsIK`|O8e~MX-VOjs$o3tVHdGw zYmk(E+0m!UsLQU^Bnr6;!aejD?A%y|*{54oaPd@1v^wbQJ$T|D3kqt=z zF(K9uVw!>^VhbR^)w}Tgs^>=P%iOH1>xtky^J$Jz?s5AMnaLF4;rIxF2F>#RQ7of@M+UOwi z%Grv^d-WkaU-MrxE?~PllNhG6`L|4{0`d)k)xwgREje{sdpNBSe(JjZsf`h);PADe zmU^!mnRGs!Lvc04ditzcbx4X5dD_y*-ZDNXz>*c>W08TPBMqk5e@QJ_?U4_<>;@`Q z+Fo}ZXm~Q-e0J~nTyj&JZ-wVxJPW#sX7d(8I5s2{>{w5(ept5Fr*JWi!Q1{TSu_rD zMzS_yrU|BW<^y(0hDzu~BqG0js8gKJolC*&P#f(sCU=Q3j|RuEJI(kWi9r0;_05sd zQ!3U(t5HG5GpN^<@~R3FS3@#kz>EL{Xt3uTgN3M#@ObFFrG-Vtji@4p=G|K{so)tnN!bTQqX$o}yrv;dSZ6Q#V}}(`-Rtk} zsj&u3heG_)y@ql$+_)qp7Q_$(ak5o9%3x0H4~^viVocSd4t%K3gTp}#g{~^zv@1UTYckJ2>XP6R7wQ%n)28MpP zv{j8BC0O)13qnFz5+@+xV5t}4MFnZ!w~TVcPC|^IM|soDVYrZ&??Wb9s|akk6W2cC zkBqFn5YM`TTj17VKQyYn^fcDU98pe%oOd{N+7ONF1A(%2)64&1!HJt8sNbgK73Jh%78_YMIi=#f*+N5riuKB^_?z;ogH zQUVHw>!enFZ$gG-55-KgVh4Ie%(aO^W|pX`@pzBondAO}OaHfmN_W`K;-9?${_~R; zE7e9#Y6;VCA}|z#soKn;TbnF&OMT)qF`l zs50($G6t$K(beNvCmrU7^^@wS5V{(*uU%`aYNj8yX|9824m7=Hyf?9BK4Nfr7?IOd z0%NqFpiBoajj8%*cnoa7^)wb_<+tA1&3toN#6QG5IX!Si`lx$GL`j_lF;V{*9F3*f zxD;X(ONOjZQyMAFlF8OK&=NhwS2rdTl;;gDFk=qkx1%bE=cKk?J@nq!9Afl%UFflqe;$pL_(i{j%L}wkL$^67n-6RPOa=MDJC4H)AE)3S`ijynoLNlXXI& z@fyz3YR7Xf%`L{mcj#NrAdUXPT^M#v+{$7|Vh%H!fr(fH!?cnKSY>tJvp39YFM01g z-=2=7)>Znm(J6%*@e;D7E_f*WPD3dRa?3*^p?iDimDz9>-qRPkGe)E$x0Sf6k=GBg zSbx1PcBX%bDy8zv$!SfITMI7+<95~#Kg_Gp_dtc~-4>pj@NA*~?l#!p!jo@o9oFqO zVgGrjquQ(Y@rz@e{#cQH>$js28NP5f>-2+Sz@#h`v4=>*TOqS|v^*~*p%=IlL>2Km z`tI2UL(z$&rIBTC*k#K1+zbMG2TNC_XUpg@#BT-Co1LkJUyE+%#k|XJT~5JRf(G#r z!(qmU$O;bjD%uw6BYB1`yDdD$u%|>Nd;Y+8cARCaC@PRe@(l=zE=wHWi z(&!O2L`>lH)DM%D7sJ#BN_tKl4Uy8XYr70VcH*X2a58EhC=6N35_%u3jG(4sK7tpH z_rDK@b%|a6w%fDC-i0>?*&7f|I;Jz6kCmFlr#yMpm@rhGHiZ59x|D93TY9n|z zlYaS_VISAslY7tdj;lJL{_)lR%Zkt*<1*(|@uU3a&xdO~PlTmxq023PwIlSe^x^%8 zh8FMhJ&*H0)5gzq?-Ml+94b#=i9dYvmuyZ;Y4;0a__Eb> z>fd_uKfl6@{fCQ~xa+)rbM=%iNw?Q%c;N>Hf5Tb8mP=m+&;R*JfAD{?j(=E;yO;Lv zL+dsn;$r@GqUK)^!hhM`ecaORQc(xVzqxw6DZ6CIN*l!vUB79*pK14@>BEeEc`(}l zaEw!qbD0}nLOR>B{g?Oq2Ls@W0?+^GI_?Ka^B;cT-&ZvKJ?BMv_b=4S zU)oLSMjEXmrVoKZzqy9tGKqtH=(OZyEjrzWBmcV)`^3Gk=$H5o#eTT@3#NIO1w4xY zgZ8)I7+JWpHv9B}JsXjGu08urhblM>c9bRa%`dm}{wJK2fN)kv%rYMP&BZi1104FY z*2!c4yNeG`rNKtQubig5@1k`Mm*W#9m` z_~u*T*M4*VzUYAc>-Ta}IrV!6lz16L#K@Pt#=m?d_pi0`%>nzDN12Pd`I`pxVBg{Y ze*wTdJugo5m_p*4y~{*g1a8eDv{DGV?2Lb3mAz*w@W{3NqxU|g%Jrze%X}MiNAA|+ zlPKTO-&yQL3E*mxX~KO67iyv4ukbzW+ll zO!|uRA1kVL+k16&waFDTcw=XQr1t|l_b1=yu|HuhcQa{>IEzr=QmcuHiRTyHRbpU$ zHhA`IBuARItv9ZR&t$m8;;jFnmj^$31KoJBMvksw20BT*ak}HrsQfF@gf~_CK0Q8? zAIiIF*_RvV_F$~LTPxvd^$-Ep=TJ1@Lbz7Kmsaat&!GBp^IU}oPZZMbn^syUWp@C+ zefE`Kx-}0TG^VsOUjdkV5TlQ37C+xcRn$s$I4AgrbL(M_FwqDO$J=nK196~16d*(SIO8qU+ITuup-7v%TT_wjrZqYN({#_ zU5?Tpi|M*{_OWq>wW04sXc5$YoB?^KIO?L?B#M%3Lh(J@mVV!#rwwy9VY^C)C40b# zn@DP|YzpCEoG6uiU_T3iUngU$D6SS7BHQUI4>=n{N}Lc6Xzz-jxo!DXfj+Lm8|QP~ zX;%0_Wc=(ZAaNiI)n-1d^)6>?BElWL4P*3m|j&glNIgn^OyJg2YmHE zFhibsUwm?Q`i*Uv!T0aquRp5YC34r!XN9exw1H;m1>I`#z&6%~y<6n;`lrr)6ZnBE zWK%W1VscM@(p-1=E8njg><)zOQv)^P%4g4=OTK4ctifKIP1hvpdPmswN%SZ?cLQts zx|D*sFdS7z35>J`nYAHyE@u_Wx2(}jhfGQLqC4& z$;V`V6_LoA?Nwp>o{@;%Y1gdko!xM72a^V@b;5YXt}$)Yx$alt4Tw2@d`$P#%+hIk zyt0_HB8rJ6mPv3SbggoN*q2)A@3(5r^-k^N=J%DZ*2msRq_{$S&Bw@9E8PaXu}D!8 z>|)M~Jm2fun?JSbe`W+=15z|br;F58TbY*K35|5q)jhL9(sp}@*cx;Lds&~CSn2{p z_pk>)ror#IP9e&mG_~9AcEcNdf4No`I&$-T@YwS;%lhwdTfE0{v!@f02B9U*vDvTp ziV5eqP3#FD`>@km`Rc;=WFk1LHN9dV*NSwmK6IF>QIR|G0mV{LsepA!63k$?lNxq4 zjd)15&dji&Y*M5xKjaZG@w? z$qS!0iOpj+wv7g?yQ-!o?8``qN>k}PY=1PZ4+cE8y{{8 zAYUL)Uq)?|Yv|j&gw_$K|Dh@Rp9y%Mf_GlFIf1d$YSN88v@fqZ$KI}PxkRj5GLRIv610? z(^A(zJDzC2B)?F75x&)d%yr?77Kki)YuSirbqtBD(c{fvSRvp)D_1n0tY&X|^^EVy zs({6qBw;aJ&J;1oA6x^`O*(sS;gOi0Xjm^X!oD16#?; zIQBAZNWQu)Y)SWfljx}hpdQyUU+fc~6+0_`!lHqcX=iO?JOfM0G zMG6a^u!;c$3=&_Z!`(VOW>6@jAymFl)O$VIY)@AMHCQ`_)YyNLeKN9M--J@4;!x%i z^Q0U<=v?r)%hqk+S;zA&W%%4Q-11$5zzN}w{R`9O!}VdD0wI0K2c~W8UaCy9-Pt;| z*Bn3BnW%4{C&v*M0yXuUzdw-6I2VxP(HZ zh-(}SI>lJ(pz#vJbiC(Y06tQC69cF^)M9E zhPu<24J+`+IAZf9WOMCr;T&`+rnw|cd@YG^==c*^sG|YalpsUkx&Qi2`k3#cRkQ>0 zL&=#(r7x{qqRHJ$?j>pm`qT2_S~WA98_8;d47>;C&4Oo90VM`Dq3261lZWs%^Ra7D z!j*YnIEv`lI)j~AOlHF9SkmzCXF-Hb$f<*bl|X*|(C6j>U#PqXd#o-_nt592F2SY* zp4de6ZUfF#%IlRVgvhP&36~nnYPVV;X($ zo%R70U7s0ru6T9vs*Qd_#&N?Qtx$pX{1f8b;>?qg*~2Dn?M--!cyG7_k8X|mkXkt@ z=R=arCKFE%OHL{_+1BX>vPYlT*h<~#iCsY(28DrMVY1TaSr##d92NM+_bU_NBNVj-K zMQ?5=$WDY5a*vFShN+luADaHY+icbHiThWrE1s>L7Lx4I^859kLBJ7VMke8;o|j%; zy{Gj9h|Er-R(tbxm$hq6{5V^74+7eHN-zIesd{7_Qb5>TD=qqseSFB`Twikw>N&m} zwA&wgK6XgpT_>LS^{pt|cRc=3&T|(dpB1rU=`*f5I)vfXQy%ef6Ax;Q`=H<2p0|_3 z38;PVQ=aRv1iub=A2qX;Ij$%F5Himv#=B@Zj5O@k&gB?XLW^V=(abI7U(i=yX%4lS zVi9@-zm;}8;A!VP+cE*26d~atE3gwcIx}UGW=Bn0Fum?77#9ghXYz1d|L>=}5=K~sPx3gohSs%An&>Zs6+{SS+ zcy;7O;Elo*HC%OuqLFG=cVDJKkK=}$QH6S+L|PJceITZ#rZ851?_`IL4ceH$i|Z0Z zg>xplqD&@6Cyylcu~w(Zu*RrVg1m9oL8YN1LDi--4CU+A;C8 zG8pc5u#TerL$cWNJmEDR&I^VTjWXly@gTXCeu{epJ0j|CM|zH_cyxiI53G_SoIM+t!Y?qGf?hk`&8--zu3&IQ08f=)KPc#l&n86vtrb{fbSfnctl728zuAKtC~jA=r^W&DoC2P)+g1 zC=OCBs2!c66*(^6EyRrXC{~Qfl;NG{oL$4YU76D@jajG!5hYi1KenWP-Z{F}`K@I? zbT3w1FyrJ^bNFapfNgtqtzuh{h%LkbEpkZ8Hqn6w&pue_y&uqCsuVg)OqIeamQpe5 z3?)%6(0w8Aqmbz`E^lJW75(6X!>1Tl{fY3oO#|;k%oXZLft*~#=mx%$$$yHXzRc0Z zv!|a$SE$KVFiRJ@fGjI?`{K>fRlo1b+=WH}8Y1d!O`5|A0e?81gIHM&w!WqJ$j*Oc4bTwmAwsLXfUmAA;AH&L8=RIX}4-&|%=F{SG2P}Q`k?YNoUb&T1n?Gr4X)4xd0^JK9O z!PiV2Rmrc}3rK1=1|D2~l#rRIE|d7nrzWEp z4?8pCnIG&~6uyiwBJbWqx86VhOn(VGT}$A^m2bfNQfAo3q zdijyl;<0*0MjIf*CF{nFo5X1bz}lMd*D-FYv8DA|S2#DuEjMh`Bg_{!&4qGwke+q6 z4@!)5+b>;)a<2DdTo^+JecWVF%4!l1TP4lY4l~8}q`kX=s^@;VlP;co)C@4>Yqsh- zu!eQEFWv0?iE_4HJ;HzL>Fo~7%^MMBjo;qDq>SAm{fRJ#G<(=HeTM1p2hcA* zu)0WW{&K@@^2#5}a7T6#6qVnU_N-x_SMBXygc%vRNj7-IyPT3l5}4*xv3I6O`b%_)phRcxTb~W$#Xo)Nau~w#rzw> zO`tX|(Jx1q+C8j$=zyI&UEsbFPt!LH@Q4buAg9)A=p%Fgd`}?5W*~2U8tuV3z2O$4 z@EZ++W*uK9HuPJ}gw_z>5D5%1x~+5Ct>p^K{&e_aRcJ-)<?K2WeF8k1?A98wMUYPLlm6wQC|~YzUG^a!z%za!`%mFnKf?SDownq~S}kI2 zG2_@3!7@9(agT-)m>a}-?_+UimJE%QRonKH&2JvKpA+k9P8hDqeppK5{>;DBdsaJ! zczjIn?{=$Z0{6^4+3apl$z=G${=Ue^h zy5ef_SG*hSugqn-&CJa3Var?^g8#N%Scly&0=6P2@qvK*V#aVPW_rC!!6Ev&uTchL3%)0Ruges!zo z(5wR8;Mc;VPM8h!nH+qon|e0O6+l-C0s77lhfC?o*Yw8yn258w(VUwlS?7Yc^J6@5 zzSOmfio{2T5mEElXEOBPoibPG^;GUgJM1&L9GBKdc{rW1>qyFm+{?S%qpRgBFMYt==HKH?a}M9sJI_LrOqJ7O!D7FTuMG^0hoT9*HW@IAKk z+F=wgb$wTm7CXP}pX>S!{IrjpSe<0mkEO%mBOleR5FPFjN+3q$q~xHX@%+aSJg4&<2)VTsmgW-Kg4Rs zZ&<(E!ujG?d}hk0s$x)Z2XY@qYj$y z2BqBReKS>Kl&u@uj=l>yp+OCG?tD%G>3~}zcXZ>N#b&7#tikKMgRq8W{RgF~k8eKp z^{xoY8IV!)#6L$xZwU6XY&*XZhBH#A#USTVu9d3(J~ zVUJt?gQrggG3N-VaKPt_05sX}e5GsR(VEx#)G*PP0LQS^xf=bfQ{uu$=?mmXVS}Iv zs$+}3a_`-d&bIvn4GkQ}=1X4;@sp3-oZ34D?tEJ4*cR!0hkdG|((*38$?cD3K=@rQ zNy(u>ConekLY0T@$se5^@3H-h!>Ia|B$2Cq|J^D`gL@MFr$&*ti-qz2tH5dw8 zE~r-H$Kfr+Q9w{Me^jG~+~$@@sFqT-FHKd(=Ej9aI@NN?eZ2|9qe2%lZ3DGz29pPA zh(z}Gr!d937CkwGJw5d{?sz)Io;i1c*)|D`NQ$f#yOKbDs4a z8&b`w4}$R5`v>Nyz3_80n0r>Qd+~y2JmP9O)oq`3PmNaR=}`Lm-W4T(J)x*1QIV=} z-LCDICtw{oc5{js&FWluN_LmGvF=EQI%^Z)u@13!TP~qNQ+;${9>k)ZIxYk76Bqap zZw1nlzgGz%zx#^Dv|iBfRS%(Jg}5e~mL`SNbHR|MaQ)|j#BefsdI6lxcz?PeE#LO( z0?O&fLCu$YfI@Ys!S1c--vJ-rEa`!W4~KM_rvNX+i05hF%hNky$jzihw4o>2H5223yYOn#qtKUyF_5b&P=WpH_B=K%Cbpe>;0 zfD4!IPUvG<%TiWx zOqTrmt9Tw9Bkct&9>dQyhf~4QT{!j%6x)DT)SC6=T=>F3Loq_LV$bzI2wKt^`#iTd zrbFZ6u7JsR5o&Q_;Z0E{l8w30$|ql69XWdP%blkGCz!aXymP9W(cJ%|x2AA@apqI7l8#P9 zk$J~0+p=udj!q}94 zfVp7Z%=&5Zh8}pK4HA-SGnHfH;lZ@quIjxrhnVA?+rb=HZkV=&3>2C*50*QmxUbE1 zX6Xvd_U6RI!iWQ%;C3F33cU@+LNAJY2f+Z-zzKhdOQg*8WSO+Zihg@i=f~KYqRcvr zF3wuTV|0%mJ=#RLU3QdaA1!l#xkFmj)^c-!3&9}Z?b~n#4!85>!>g-~Cq-;yTVh1i zP1|BGRivl85U^7OqD5L9HzT|7L3i84JuxD75!9_|2q^jb(-_jwMHZTmAmTrf!C~qA}SGfN5k=NherLIX*H>X1MZCn4j2ARUfzfI3Iau|2cv_4eu;{h@HW|hbqpbBcn?yoQZ>1xu zmIDV}$KNtPzUG5Eue?+TpxPp~90s%q&Q`!`v> zK||PDPl~IP->qHXw6_;wXvebE5-rs1*n>=~xYOu$$Z^|)P;KTs|K%j~P8SjmPq0l( z@TwT}Txm5OE*DDyR3dbXi z5|%O7O_XXeeTNH6RigzJiG*RokV#2(I3XCay-w3WaWQ8mfxD=q5{sz$j}n9)RU6gO zJ;;Cj()_|?V7tA8L#wBw0ox%+sA&Ri;64b4m~cMbKst-yiTD!RAzq-myxi+D4=GJijs(4R3TezLzKMdqApR^=EkY*-`8{Pijf0RyPeH9oO z3PQz6`W!6+XJ+r>ItY=iOEI<;-`AGu>hNuV5>PmhB+qc^GOofF!CkfxKQrLE zKI-3kb~jaiuY0_gp*vDDO>Hgq4EJF>7nRtX_g2m1WVh=rHYr&9<||Xjq1iyQ$$96= zlVm|V>_dPEKpA`Q({1lFPW)mbHgRH3yia3K16Wl;v+1Fn)o;bTc~=jQE^Z}QQQ~Z} zddUfC;j?&x8npJfWuXiiWc&}Bs~LP1H_eH-fza@9=DDk@KhY4nsRRgE!AB(`e_)%7 z4-?_kZCihkR4OqQMs;|$=e;464`%TeK~_xVD^4OaLAD;1*o3M#*?#0Z#DyN|5kfKBfswj?XJ zLP_v0g1M*Ig88xjD6Bq^c?aedQo%AueDoHS$V)dBn*gH^Wn1Q95vX1beUNS>u(-8?QqI!J!6{&mNOP&sN`dIM;vrAX`2c~}Jn&7??5mgB ze1Ej^=Ee7zd}h4R69jU6D~7+lGm^vDCezBgX)R$tJ~Xu`+$rog7dz-kE7iHP?Xlb19g3~ER*EKLf=c~5{nsw zIDlYbB48fKELolvI^oqlNAix=NjfC-ggWFh{N1S!YUU>jE=g70OSzl9I!jO7zprAS z44+oke#YIjU(}`f`scgut?s|DnS2c6_*nxK3;2e!!$Rrcq2Ps%Mxoo-Zul{7wODhO zYXdT?F5{qr`C}jc4aok*sQ;wG@V?r`jOS}){Ve$7e-9im?^y*&F)D8iIFOv|z)ql_ zP4_lsY_w9obLT3!axsi2X4faWA_seWm6S)4UY&|Ov-<75S-Obl4H_C6gOr_&SV|Eo z3fv~*F(zQp;Z#abk4=~5^kfIP+hHSZ9a)DgI4VPTPKUky}5 z+I4v)vrE!B={zU<9cnhF*jKb@DcKMKy>c-zF*L`a&0L30OCs9w+6y|1u+Y#K#A$#} zw5)eSYqhK0okSgHLd@meb1Wgo-wtjxK(@~S998k)xpBe?js7@SYHK@Bfdtr~sqftG zY9I%sKr5X)GcpX06dCFNixpU?8n$s(SQU4md9&JRAQO;D;$*uD0QcefnbP;Ck*yiu9HHS09os z(+zwk^P_)$d zTKuUmx2>(M_f>yo9I%=Ovm2EF+`KbA`(+o#kCFX$BlhuPkaX068KgKC?y)Mos(1;$ zUdP#zgxJAuTl$|Est#DOr6yc8dNG@%zWVCb#dT>JM%=unprgzODent>e99o~A7GC# z{$DWof8fW-i+2~PUHU<+(;9#I_HpjS(P#Ri>wSiBp`MoFg1izRdT^qo`_!={CT0eEvx%{C>i&MD1yRCQg2A zp2zLmepQQK7))*T%BuzSiYWYElWJY=FecMkV&#u`2hpQRAPFi`w<(iZ zh#GrYqH2sVa}E|UTrL}z=YGv1zDY&(ShgNYh+dN)OK6~NbM_Risr zO~SoCQc4ZL{Od8HchlvVI5mmy4AXnNnb&SW61;Ev8Hgz~6xk@!`NGTl+Ss+9p9c zz27A2`fS+y_vd%IG@hU$D|lRgU;3e z%eFT5R{-q0eT1!zY*RphHJMe0C(v18dD3P?|Hb_MKb_J$@EnjpjCr!Q(EjPET+g5D zyy{EU_7!T$zH(p4Atx`_X0Ys8(nl8?HeY_cu-BIBjbxpCBzd7yo{!+6pO;r&8f-f~ zF|khI?Lp}+QFaOv(lFc*bTc?ya3IE&V?GOek?|s$9}TlsLA5vjx#9x_J5r-l!PLE~ zNLyZB?tLhrFAMl!Dr#Y<(gd_xOg(@^(VjS9rtIiggxcszQzvaa3}vEjlaTUA6o4J5 z(^C=pwo4CaM>rO(&95557gi@B5OopZBhra44$KU1fS{s?FsOb4(3p%_IcRi|?mv7O zTY1`AmUV9C*n?FehpCoFLtn`r*`VZ-AwTRqAp#3)E}xDE z_kg=+WXKU9U{d^*LFoSd`xl(^7<1m(@r@!WM2yUyX}ydG!I<8Uvhb3TbL_69D)L*UdPAtgQt4a_a3y zE(?Q~&fR_!0wM&|@fpoda#faQUFuJTllEvQfLSDj8gee4d(-2KFvVpj4NV2`VIAlh z*`8EJrNz?fnHM?EYUHa=fo$LkSFhwL1gP{c0qAdewokihlbXm)|Dl;W{Eg<`y?aUC z^`IUtbGi0(MA`*Y*^#EsNl-YZXWgIo5mnQkguhf<%-^U zM9pUA7QkyS4Q|)Xyk-@Mm~4)027{FNfL9;Md6UcB6I@hm(RF*LG<4Vm;M#7g2f)r_ zY;o630=FmxxJbh)o22{dzS--MQ0EWk+NL%Yk4CW`1fgMhkV{%=e0JhM$OdrBp(XyB z^~Ww4`1ng!gTss|@gQP`hK4?Vk-=tNEaT2NHc!e?O*onup6Jx!+a^Pvbz53mN~PI0 z4wmEwcPOqPZb(OxyZ;Oue!3-MLmL*30*~ z0GQOsXz`W-bMifDS4kfz&`+7ZLXb0g(pD7Q^NsB3f``>#74h^$9d*f7RC-aM4`A) zDJ12@;cKQ>xBm>B|6%ffdc{p%n!RCX6-?M39{)2^?D8I=!9J?iq1X%*nAEoA=jU_3 zv0*|7;)o6w)mDcweq+GdjXMGx+2WBiNv~BJYe1}M`%HJuNxyEAt*Gy?yCV9eS?s7E zMChT64RIpN^;8O^H%4Z@d-oE0=-J0qF25CF+qujyl*xBMU z9?*+dK{)EGmz*WNozJaov$mKdt)L)=L6B|Na_|o-AyB4=9{6H=0A;CIaVow9W9tC1 zN`%&JgTAZ?Oz4T>`$CW9Bi0h2p=285o{2TiBY^Qxq6;%v8y6mS5N}8Ku4bIt6jzMl z$CZOz*h;ubkxWCHSvMNMdC&(|iK)126bTQabtR?>GpxTUlhBx?Zfwz+9FXw}!H|SI zdyaA0g9=?d>C5y_-U7(y3aUHuKC4NkA3Y1{`eXf|w^AR|O}J68`wk^trr}Gp-+Zxq zp0J%XGSA`G{&?LCjr!AIMmifW=)c0(fb`sRr$nJ!EZ&=Wcc+OJ(}dgc?hH!=rD}YB z`cbR6E}>sLOD7x!p$=*p^*yWZ!4?xRUH7%l9oS1r8MJpA>>4=s8?#wZcmpgt`&T zcy>JQO0t&M~P5JLXc?VwDna9A}`#y^C&%jpO zaX3oEF1|clhfhH9bwI$S@3TfOwxGwHoRacE8Vqu?Nxad`K2YsZI?Kk%X~+m@;)hSDojF+oaUD+1pvm=$$e;%**;IohVZ$gK7Yg3z$!Bav!?+nS$Qm* z&L-SbK0Edtyh4B<`n9DyWWEm(35t+Fek`MNYtpw%ugArmafc}h+Qn9V{I5q(9`iOf z_~WS5SpGp25>EA6sZ@ibiY$ASRxK{QEXLuiSL~#Xkkpms97Tg*Jef_(YF<$PF3s72(YzCH34gh5bcEo@yC+~Lb z9UB{y%WCyUJ9gZ?r<#!Xk*f24V)%7Z?&qHnm3ntFzq&XydIIFK{~oyhez*Vl3GYQv zjc{UzISYSRz-Z!~uE7yJ`tN`J++EPCEC->c#Tfv@yFY!t%ViDZfc~wz^>TkP)_nrr zWo0)vFvHs{24SWpKM_Oz^_816M=t(tPX3F4^uH$XS|iY%_P_G?*NXguAMLCxI8NR= z_`837ggG&(TG!{7N7Dc6xxAGG0HW=r?%VYbYHi zv#vk09nW>Sn#kT@=WkvPiO5&>dYE_HGzWV=tIMU)&Vv-=Q=HYH`1-wGXQBKa(Q*d| zi_nMTi;C8N31mO>NB(#=>3*Ey~)q@a6dr*PcI4A7VQ@)^MrTr&H&cu?nxlgAm?JxBKP5vYFUFcpUf_Y z^C+Y!8DKw#oDmFxcy05R!)JDhB3vp%P@m5ICDQ)%W@@Q;gW=Bt&mualEB4Dc>Iijs(B0=}lVeChIkly=6*c_of2O6HOHJJ9Q8ms=AJX6&Ei4nRvo2Ni2ON=7j#+Up*lF?)nTqutMbIsLHH&2M^kfzpB z6z$)R#X7f;dL9+}fr}5~^6zn!mA%%wBy(o7`=x&C{g@I$n3I~TzHDE^5$S!U%yoQA;2wV`tN6^@Mt zz~kERd$Dhad}!3R0Yr*>C5Js8JwvON&%QJqg6hV0YS;oH9{IzsQq`ovK3C8d(alw> z)~yIjBN}L`1yvA;oiTte9yRg0TFjo9NRqMgNY4Jk3j(N1kx(Tt_3N5N4NZqy{RXDZ zhUe}4*JdcI^W}Su1`0LScNr{zyryjdb4O1!yu%`eXK`%i!g52J7Yj_lLVRWm=DJ>8 z4zz@PjU+2wg^H5&$wQR(N{5NCgx-pTYWDdO)21r|YEH7mA^qNn!cD=@w$>GtA(#R9 zaLqB(y0LLJp5>`N>lKNm&s)zMe%yxl?}6~YyPWIZol$sLxx`0RjA)OnE-Pja>fM@n zJ6V!&B7)+mGQ^pVjF%NJp)HE0l$G--Q&JsU=GA9qv^^jLujz$ZVd1yud$<*`k+l_lF4WC-Z+c(U2wJyDbKK4@IwRvI(@ z^aPW;XE!Z%iki-h681vw}sqbf@1-5@7#I+EI-T zr5V%BjpI?o{Bw)rFN92-ZH!0S~Ei_*|t2dv2La|ji>Aei=0Xgcg>l4_&xk-HVAf-2I{enGWC`%_)S%W& z(<|thaYJ>?$X2#V3|<O`ek(QnK-6v_q2Wcu#_+a5gq&61i!txPM`AvLg#5GIIELlvir;Q1=p*9#?(A&Gpj-E8YKaU z!!vBXoQJ<9eYT*chNFtFm+RatRo#P5b4CbEuUc8Vd>|3bY(jyg_3YcZug!quxgwg% z>39gzGH|&o!Zr+ONu$PLXl`XxuVPXkpfuwh+O*P!7n!D&*UHQ{Mz)W^f@bL%Ul?BZ zEEu5T-c|!Olw5+8I}(Tn1a>Hq)%C?2M6Y=*5ew^+PdNzzEd!Y7Ydnd8KSvbm!Ge zSl_Wdr`45SJ##Ue=5Q=^G6ve(slx48dXJ-DsB!%&{9+$J%(9LUTep4O{3jAPQ6iWf z3h`_;Ta(L;71mu*aa%vp)B1OavA)CB$720Ot|C5qu&4KpfBr%&v8;xN7@&GGNyLZL z@u^+#Q<^jY!n_}#yr1gVtKM$wGb{6cCT_uqnzvfsa&#kpGmv6Nihw(~a(2R27cthR z@=(;(V!HGMWO;B=cETg34_C|S%1Uwl0y|?!0rQyJy$>UU5Zd~*Ra*e>H)!Y~epj~| zdR`=ns^*8ora4u%WDK{z4|!ClpLRI``i><&o!(JFe%`c7 zh#5m-F5hgIr_WG(P8Q|4%&9Nbi4O~hwZAdjgIn+(7FmwXE0K)A>z<|>CS1PCzc8bOctF#}ppK}jIVPVKP8P|zg~et}Y&U4?MRPe`{Ad~%299L*lKEmC ziVsxjq)?r|(P`FGV`kaqf*EHAYN?@82~O{7Pa{-C0CQ&(3<3_fy%P3B*R;euom}6c z{KVTqAoy)85B2H8>_`+>{d!*8vz^nw+-ENq#7Gy-u1TYNtJRp-_L_P>ryHer%Hd%< z0O?Hq34pm56^4?^>DI5 z&du`iwb|4HienlL4C(CD@;pyZdw}4d6@VEh zLie=jQN+hc$yI)2p+QOmoMN3>3>UO`d$6*g62DMm^>Pb7+}YrpI20#uU3J6j`+C+?cil*+6hHSlXK=(zljqcSKl9K;<%gJwkTABs#Hwlm46=++@mwZr z+{($treaY#tJZ4G3x=*GL$a6mqghF2(yVrJ;#A)Lr8PG3ermRD?)^_ zj#~Uu`}htb1Dy3UW<-mLE@-v2K7dbefEvh4a5%%u1#Py4Q=Qs>5H;A&H-Z{u_{0*k zf)-SI&pZ9P#0CWa4(3$v;@u&0UY<*{lq+2o`ZEQwuXMbVSAA^ z9V)4IorvQLWADgU{&WSChdKD^j3Xc}1Px`Wc2WynhQaO2PTRE!jv9C3j(6U{># zy{0|5r>ELl;;tH*a=A=VaPw^zD7Q9If`iFQ!}JE$v1hWYD=4nA%ZNgPuDrN$n-$7- zIZd4{mLC5plhTW?oYOigAZF;k;H%MC(=U>TIy0Li067brCIzR`Cp^^0}(M&t#y!zo0+$Tw=b zP!R}|1*(~MN<_rmu!|hhc_C@ei}u^=rZt~J#lxf8$a=(axq9L^y~ymdUMYq&uDUvL z$bFw#r*B^%xjeB=*qYt5fndZ97N<>g4-%xGyj;}N4!G$ezYLxbz9j>Mcm9O@E*KQixeTTWE|M|l{?@UHCEA#X`{ldImBywug zh`elE@7}>v!l&ILu=${b(fgVYxmd4V>YHrz55kyk-;YexbN z%e}17WhQ3Cfp@mi#n~DpF|uFJYI-5m`pp2lfnmO_75c8gsnf7;2WPb%Z945om*1v% zy^X`9ne`n{Fta*hkPt_qkuJI>%Nry29Cl0R0#-`{RZw3S-G{*^%%BmPf z==MoSa~Kf_R11S>uhkL9!Km%oTDS}x3-2!&sOqE5zcJ! z?6|?MxfGOS=TNgJJlDxa=Ofht83moaTO;7LG4k3@{Nvb?Zi2+wIOmp-<4E2-_qr0# z7xgGR_p=oDsV@?TspCwl0x$8+vZQPMkp+Qk^$4M_&QhO8w8uNYU9O_WU|3Sv7vN$r z+?VeuS79n#8yZBZB>4+urT7gQ_|NUZzxIilHk03+bCh)UAOxI_3L1ZVLl>-#P22uF zHu&wnpM84EoLlLx^Mzi6dstd}nlb7kRajBesUbgTnsmodBaIPzDGsXt4cOS16RMFIpVeM0$JYsj0y7wpChanY9(TW z;_FdpSf1NnQ$Hn|ZpKk$m}f0Zk)^L15@8sClPGD)b`)D%4whiU{jUz-X@4$!^V_w?nviO8A%^F1ecQ;Y0`>J9Y*8`Psnvm$M2? zN~|z9)4%|BNcCvI>O!_9yhygfnOF7ED}2~DrZ@6F&ctpbw|1#R5)sF2#{8afj2042X?=ZrGa!T2%;#M}*r)QW2cV z0z`$<%?94L#)fMQv2HCht8w!S7UA>(`T9w3+cLu;AGBeH74FJm{;w^ua8e3#f^yFM zl+l#MxTS_FK~@8`5ZASJIpkDNAG_7F>Y)dT6~-^yw{BRKMyRn5x){#X6s_#i^^4SQ zGb}Mx{uDkFbKN>29;zWhn6eRH32`!>{z~g@evV!{_psq?6||BMpED#P7(=@>@H%xu z8%^Ep&lKdz%7hP9E3c4W88Ss79c3$)eFU27#-Du)k5otTwu(qSs!CI`8A?6<;N=!N z(S2iSUk0nqA~xV~E9KDf@gBo?w?QL|rgwYh2>dpjohh0Y)tC4bo*+XG8)MgNyB@qG z_c*BqT=i0U_)2FqD~b>-C9nTpl{X*}rqzO6MmH=ET{|+yj)W!8Xy|=IoRq6{BNiMV zp0$OFr7~47%yd5E`}zHlCbOiY6JbY^gH32B=IO`Jd2*7Q>sBTa75#>>x^Irb9vkK( zw@%I5s6Tq5n&3!$X53ZG)MD4+{~9pMBFv8|tg0UA<$v)x{M%rji2}OVO5NZ}l|$Mg z3ul&9el&BWilXMXUdG!3EiYD4y%8p#xA%qg*&X>3+$+FvRpC{6IeszQ-K&E|qB{R^ zyO#os(r}fNnSlv1{gpwiX3d*`-C>6aF$*6`uQMZQ?|N0;4{n<72%MYF$7X0Y+@G+) z1VJAxR@gqOl4vnl%|gBjTrpEU84breV0~A+*g4f9HQP&Dx_dbcib_^FEZsS!oyY~R zeJK;yC{4ufTo%Ilk^`uS^+SSXcT3LkwldA4i}tfL7X}30bh{Dje|9eB6JnO}80-n2THc_%XgfugT{1djBz9 zPB2Z;B>ULU#QZ<}f_NoQ;DMgWp-GwbZ!~KiX5ZeNbgL)t!?MoTe~L)Z)zs9?d}Ao( z+|gHnKTw5F(d5;!h3BfDG9sKPOhK<7DIOi&$3R_LsAu$hyK6g3YIE8V%VT@Tr49Oy zQ-Fzu^Vxm;Mt#7&jBP)10>3aj)WFTA?mPXwu#Gyh%uU9t?b;`Hx*GP=))4x$63{nm zb=e2pv_1*g4d7GNw0!<7-q|HQJe&@q%1ls;cgvM zMAz*9$KHF#HI;3D|0ANHj3T3eiV&3%1jNV)(i1GmNEK-ULR3^hKtM`pNkmapl;|L! zbU}*rj-!(Gk3nfd!IXUpMRhKeDU&}?6c3>Yp=c5dwtFi zn(4-&!tJe&rkhtkZNU<2({;iI+AH|>-VZ2mw_8xeO5zb^ujkgY_xxrRe&%~pyqk~{9SmrJ-=Sj@wVcEe=_$6$gwc}Nh9&$vld=a} z<5Naf6Yfan1zOxm_*`kd`KCrm1{py%DjREWYixAX9bjhPmRM%`I z*~PQ~wdvFp6O&Rx(d`<|#+EqbCZI-JyE0g@w`c4)14;hCX;@hoV{ff+u|?*o%_nBM z_VKLioEfyqk?hx41qmaNAtHGLL;RX&_9dO zFdR7KSHr6ey!O>A@~(2L3!2Gl3UDRmD9o&D**W1c`BSn3xaFFr?gGC!ViN*0oZ~9X z$yfTYym>~Vo!1=8xF*?Umo2M{%nnEl{1%8M%_jowq^{nPv61XLdzk8YD! z$}KS(Nl6YhlbYsr1yHGE7AEC<e?^B{D#6ia z``$15*01-HEjzQL2zK=p+QD?=YC8p@ijtnxT?q>0Q<(k9nW$_z8#Y(9a68_7v3pXy zRTEC74k;;UM0k`}$W&z)aTq5_jUv|-AD_Y>LZnumclKe;_`lor7;}1SZt{6w5Ss+! z2Tb90^edQ6-xZ4eW9hyyAYQk--^uy>;%HAxtP0gg={D8ShPD3+Vh`55E4xl|q(OVC zDZffQe3zp6uVC9YBUCS>T+?5y?MR5boFC_D`I7T6Dmx2_nT+0XoMyofvu*ear`2Rw zSw~`zU2A?Btc7X6twGYPQf-0j*9084iNZb!j-bhSoD`w`T3>@~SILy_>ju$7#o~6y zy;{zXnCWNkbc^r<&#`9tde7`Mp~^|pO)k3zLP0=1RjXWRbc@yM4d3U)YSXn6Zcwp# z2hn(A#~N+0I!81QjTsvH^^$bQ3YpN>rYKoQKdKoo#P)qDih`H>c-`@6Z&ujDq0_T( zj$nB+I8Kmt9Cll9*Enlg{v1Wl>0ukOE-A$(jPGQ-3kF*m@j!$Hg-n%S${Yjl(^UVa z0V!S##!F=oT^iZid_xgf^~CL-rdJzUk4?uMK%;xME5W7+4HNhH%oQVIjPXPAWoDsY zWA>0m{wKwtk3T0IUOwgRUXp|_eb%XXKUpcJ9pPgYL>$9?u zGs^u+MI?={re`=dke(DKpNr_SDt)Sq8jwq_nXm8AlH%ULtiI^2vKz3*?`D2Mk6FDrC03Tx zET<~8Sk(M}Mq)Q*AkPt3#OY8QDiZPkq;Tv|i*jR=euQJWz7kIhD;vhRP7;gvZpmwjT|Pe2IQ9ZB#p4fzC)9??U2p#|Pac45*3-s4XhM`M9mUjt_pY zw|oavo{&~m{-&vU!S>Nr`(&Xg+d>ogo3A*3@_K3_?FUzg=COLPLw7ePz|3X|qvjI3 z1t&7w@Oqcsql_myUuTV7DD6B&Tv%H_r!y@dW4VGmN67Pbeq!g3y|pS1UreuSrGv9| zw@spzaF)ml8T~eV+IV1yfO|uzus^H+zE`8Z_G;`(qTMDKPpnE$%o<7QS6U$r1z_R} z{SFDS9DkX66Zw3mvsWsdT`+>o5@p9*{OC5zG$U2{+0o4hZhTUV_=w!AATnPh+l)-( zX)W=P*J4C!T#qwZF)FhRua>|tO#Br>cKgt)TREl(nD&>Uc0zf8>oH50lRWOu7_)@? zXLjyhC#0%!#KV?cl(mC@I;1cIIoI#=OOKyyYp`H4JLwx9K5)JxB7TD{KDX(gQ?73H z7N459X~zHG9EnBtc23`>Pjxt3S3v2K>sXiV=doysO6*yCzdKhOp2zh;w}rE1h=+b< zz)IBhOTx??>U~XkXStyP7A_kZCW3>QtK3mmu-G6Rk{)}T-q94`bVqRb-nw<&arT`_ z{WoIZSIafm9e+uuH|j(G@GS;@!W?$Bc1Zhye?6l6Tog(c&kqe)i8erS&!RFF@54ZY zcaDEEF7VJc$yun*8@s zDt?!({$*g{EMN&MhMwPjw)+u1IhhV|n!-}nFjGB_I*u<|O3+8?id!aYPqhpX=8s{JwS|Lh(9 za?DS=4ootS-dF9rW=G++1-#|{vlkCg?yJ88}w}kUpOM?`Az4xu}2}No$@Xhm&BA{>w$$S$&-{`gptB)o{?u^NL9&W&`nySAfaSY)jN6 z+Rb-m80|iC(O~y6&-1%=13hO4iV!M<2Vd7{c0n+(FjFnO268ut18h7Ch|Z|!1E5}# zg6zpMQ-XoEBV**KuhDKeI!uT3D&`ykhf<+etMiI7es%-sl9ao4q$r#`wHE{0VMw2kc9;THitEe4I%~8OAD_+E0iEcD z(P||a5bJ1$Z&(6}in)lnDDG6NmZC$+A^5d=)Mn6UryqQmMO>yye8zvhC({iYlY2k| zb4u&Bc6UXg+Ca(~nW}kHR7jw^qQ(50d|scTATgG64ziE!)=(q@#9!xhpYP>SY(w zBYne6rQgd868u4pWTUv%oBp_NZVoBPB4mlO3iW2vb3Z!2YMyIoBBvm>Pn%|-&}%>|icg-0_nA#%&}eCR>+h0B6M z)G_$J6?l2#rp=G*_p-m=p2DRhgXqs(qMQq-Hs-y4eFn7TK{w1seF=-0k8+7Elv>Ai zGvqI??9WL>E@qF3<~!9M8v|k&s`NUw{^Cm?1eWD^<#Wz0T<}PHSblOXva0#{#^6iB z(@>|>$oa|HDt(S=lrVX~b2J45-~W-YTlMw=f%#$7f5l8}_m6}OMB@Oqc_Aydnbp;X4!%x53 zl8AhznKIrvG2UdGiD8r0D=GZswgz_@PIOGmy+c3h+V1(qMwr-|*JE@GCps9Px((H` zp3wybqPMZE_*9R2FVTGaG6jLNP~{y_n|`2}@|*>lQzu+xibFQ&1k68y1%CaO8+;2s zR`zN~DV+TL9cjBa>~H{iR1MF6>=^h8!{H%%PKpgoU*g2eA0)O0hn? z?D3lyW=k2PIo5n}Lba|wk9N<$FM!(PQT7B)G?!iWL(UH;qgOH5%+#^fQ#gH4PFHGF zM)$q+U$=%@&ky0YJ95lSDX6t+HC9!A+oU^G&;9{nocXR4v!1sUn#ot;28#u9=puW<)jx-u`T3w6Mo=+earX)wc2C4I&mdKU?`90jm zj|Xx% zeJh|xYEnu0wY22t$)ry%tSRD_g#mr|RXeFG_(f_}mg*3yZtAMhtAVSMU2|14eqHgd zSve`Z>3F%P&KQReu$7M2crwuDO@ zzFhhin8);(1w8QAy`#-E$$y$koIc{)bzZCY-Gxz5iRSf5;ahv72mp&Vw7bC z4*dky{y3M>P`!dBig4Tkw`kjUFUxWArGv{nVYKp~lOH>MV-*`XvYy*lWqRxg+NuGw zgD=~cMEeoTeIv8;QIBgOPA@RmU^G+Kb*VV`t2x{eT4nxktc1rSREs;?%VZ_qM^AQD z#@jRzaAowmLH-H>_M6DGOTC+kotL@5qt_XSnr=PlZCTH|!}qmt-*4c1sQp2QYFT`C zQQc&OKP?B%JSwN3>n*py``{F%vXKl4yXmF$1+#Y@b)u%^(G#>D@s35|r@VaWuY4^j z2F(J#>cRT#h_yh}PCwQ2gs?LV@z3)Y$Gyj5amb<9ue0*^vq7zVx~Zx+ZxM2d>xQWd zJ)=~dN@?bO4EnvCDvP0~D>W=|596tk)uh=(sc$G)$k!4P;K*Y7uXQ--e0I ztmE}GqDRtxJ;*@bjtFcqDmaSQpl!Gf9xcsoVK;^Z```hc(NO~E3?P1ZqaxA!af$pD zxU$%OgzwTbT2n{eAt&ZL=~(3oqs$d={*4>SM0>yku-+m)R6ZMBT@k&@=UG8p?|i0! zZ}0{aauRsn@Wj|i;#|_`=f^q5{H&u*ukzqG9cq@MT(iCF8Q;zYFmjsl6^Gn#@_FBQ zB@hB)^N>;IDv&pcf5m66?jofkFs8Euc^+~sp1^)jY!aO4VGhJDofITLrIkKdkM0L z(vV7O3zh*dcicTld=fS=n%y4@$t`n6dk)R221Jv+fu@%p9KMGVINv#pICWoEqFB0= zkecN3{d?OMg#*RlYI_BiB;cjl-~h88tX$Go^7sK+ z?R@0Jx184OAKSbqL=PYDm1?By^xz8f; z@h27vH0cf|)Eee#4aZz>!1;tla49eB*!VGV9{JUd zz6F=d2Ez3n`tCuj)x_x5jwt12B!w((Gb{f}mJA)@##f0>A^@luHlRJkRPDn3X2VU@ zDz$KC@*{lLcZ<+rCXY~EtxeO6J&f(CChf3a9?Lk($YFvkC-!)8)#1vW-47_JapoHpmrpJ^X}c^xF&;}Or@Y!!u|1yW-!WmWbc#Rt z+e#+I?#ETyY?j%D2b3wZvg?W`>KJ|7=Qs%`Vs4l!uHi&ypdtoH8yw)?TC-T{Qk6yi zXo+HITr6vF;Fxd-&@{L%-o4uezeMY@_o@G?ZWc(*GJxeMMw zpJvWor3c%))f^ak5hLz#WE9X7ChR!ZK6;dGM3ejl2*UwySj^3B%_d4tDgH=_fh_cv zoYWzH9NvbK$c45iQojwK`=O49@&u8M20+qe^Lou+XdT%yGgi?4a{sRoU5obU0dJ$n z^NAFPl0XmcN%j~_U#t<*U160B11A!*n5{*!Pz4$F{(nS3fVe}i3JRQNsp>h$2O3lK z=>r&EyevXDTwcNB0;qcSB>U}cAq)SZ1z`7WySAmP+_?* z#U-zsh*r~u`wy$#F^M^)yi&224B6s|P5$VYhdCFDWY)1o-%6*l!r82mseuQ@sPawQ z6`tkx$3w6y=@$7$@B%NfA9o(v>or_nR2pIe^LjMa?e+Omr0t$xl<`It?3^8N4C$W0 zpSyRYJiF!^JCX5t*Ewe0R@qWYu&p-tcK{Ep(%W;A(3`h(oA&hQz1ualZaLY$kD0Dt zniF{9?P|Tv7s08oX{Gk9BW6&!?;m;KNbPE9OAod%G1a@V=+JzT;Ra4C zE)+RF^?Y7>s~}F^K{)<+4c^|Tc+3-4BmoE#*q7J^r?O;QR7M8iQX}9q z*w)}PGeF<5V)#QvXbd>K;8l~~;4xO)5!w(ndr*p&0pBtRDj^nr9X{A~rU60e!-*Z*6`)un7n$tKR zJ`UA^>))r3abGNSnzb#1E);%P_j5NUcrL~ru%ZpGuePoSrYMIw&zcx9{}rkYqM1+Y zledEe18-)az_De*Sd9S`PirXTyoMH<=sir-qHGhI?V} z>EQ5L&@M)1XjP&2;s+sPvyeRXXjC8cNZ|aSf0Hb-yyecvLDjLiqOpYN zJ^i&4#Zh*i?F{iT8CyzEnaS%Gl(oEfzN(W==y>z-EqRrif$fE3k7a{zu#H?LCsV-@ zPSvS?>XwKW@#9zb@rg*P4>z%WPmJ7!H^&Nw-snRVm51Z2UUEZUAy72hvQt36uAIU1 z`t}DRpBfwN1DJWGv&&27@Rc(A;OF%&O{f^*lsRgoOIUgW@3(oobraG%YLI!!+m85A zgd_KAh51ZT&W7>9mc^Mkk;JY*ralj03}RN;cOrVd{*n&-yPPH2I*oK7TYP_-=(~a5 z-fzeq;jHCbVZMnHI0E84tX&CS6qcmrjUh@66z8J_+FO}RlTB5tcRoH>3pA-xOc_7x z+iWve6`MPNl?`zlS3EH($RiX>I_tu=Y7shc#^A#}$g9 zW}#2U2Fu3GgSX+(Za6JX*5~JH@XeH#N_C|0?HR@7XFBB< z4^Tl8!1FPkfC>Jk;PVN9{$lxc(6jo}#Q-#q_SYPH(jz!7*D7lVfhoW_m>MTZ|EF{&Yw=bdV zYq9{n+RhkDx;tNl_jbqemX%J<(LCm48rY#dyIi^fVek#h&d9If0u%~PG~y3?%b9y~ zyFb%--h{r3w@?1KyKg><;R`ZKTMD;J&ckWk&~7W0W0^q~2t4hT`YQ}$BX@o+ncF>{ zg0lqnVwf;7s)a6*AUIafTEB)CXIOMW;|$Ya(F5WmP0tNx+>C4Xm` zhC{>zoRz`u2I2Y{kdOP67s3B_uc^oThT}8DLTt~y%#*c9B_|f(xXyvY81*^>QoTH` zKKGzckP=Y>Y#@@-O}mR||HPKVoZ|a`>l`i)o!@EOlcC-?P!#;LDx&NFVI0t0ZRDtP z!fzZ)W%Y9rI@kQk&>t==A^es@qc+nm&G@_~yKD>2w{Xr)O9?JG2FhGu*$nv^v3aS} zQ`<$-30)ZuL|OGWtH&$lryqq0j(?VSLZ^`Qq9Mjr4Bo96KZ2o}G<}N{xC<1ESk?`; zW8*Pb==m7jYMuy{EZSvc$X%8q!~(pCM{6EOD81oS7!e6WcZZ6$_VTS0^v=QJ0@h-t zRMk!<6L--B0X;zli5ycEyTZ?rhyPEE)|nUuK~6baci~Njy?9uQQueEzc@@LD zXaLb_-x&W~E8U`oqY+S~fAeeyZK^8u$zkqvveNr*0spwAJ<5PiJe=~I4zlH^;ZVoy zXxZ*eqS>zm-q@(#LGEzJ=!MPmM)7AN8D`?D7aaY)>7^dTXwn}5SRE@kqDG}cfa&gF z*y5AbFI`M#hps5epKFrhfQ;#Y?qHS6%?1HCi{bZ;P|rv~+7;|MU)E>$zli91=K5SO z3!}q$^;HR=@{J-X?Qr>0gzn&Tv)c#JoAxzUPyF^;04>e_$Q;p?$_$w?X0pnU`a?ho|Iw$D-E7xAu{8m&Wdfro zEtPmO@hQ)EK^+tRM0pkCbtL(%y6VYVFBR$+8Zyoml`5|`JeN*_>5xY&bc^Efm&f|M zSK76GgF@z7o3aKl^yxZ};Iw*fUG{USS4(YV z4@95k2dOQkanR{RaGE8>cxxZ#_K5Qm;tGe8aL;T~e?LX(Hm~Dwi35-rh3NT2+zP&e zP@Qe-shq4IY>KDh@GqV>ic_qH?egbGhiLpQv6fQVb{7KY&-+#-f4>+0AzVs&=E;VK zi7EFyms+bhw+t`h&L0cBo8ym&n9bOa?%H1tX)33y6bF)r{Rf@4JitDX+%swFt)H!}2p?h}7%w|jOwJb178Ow-5Ppm+D1 zvD3WAJ%cdf{$l=W_m zrO5(h3#W&f1q!%TAF>(E|D2eurhpO>|7_y@Z+t6kixa&iaSnOEOx%HAUPrM@|cHE z4!75T*78m+=fNlpB+8w4f{K`Q#5lm^n%?EWcD zt?NETG-iH6{S)pxHg0s+zh3!&{7aexDADFknN?c|3GWuY5JrFMPVG;2pSdOH{`pe< zxvIm-yJ;oAvsHgS@ZbI8Z8#`tLUNw-z_Y3gWt;8^>Ku)d+4pxb++U~GO;<>Eq&*|( zr$1)p|2Y1CDYABkfj2oVvpakZXODjjZTJZ&9Lt2fkXuW#`5%`i0Q83tc2a%~A?rUL z*FfecP&hsb?a5gCrBqPoO!RgWUh^Eeodg!>HFGs&$J#Fyuw2wFkJs`t{}EVu{R#@l zTk1`*C;sJ?{oT#l0OaJ2dnxgEhW9UTJZ;M+KzD%8w^^?JQYM(AHcfS*@3*}Fd5rRX zzyOcYe$n5&=1bKD)Ee{N>9vHSzuQgtv7dnf-gC|}TGKKU{7(2Zz_~ODouYqTf8RBL z&U?Z-a6(6E7;~A({tfWx|1$r7_m_l2Y3Zn#*Zyl~Qs{dZ>+H%QdO<#+J}Zy@XXxR% z+s{{e+V_o>$$Td$X5b;3^Z%%r|7@rKkBa&G)$o5q#r!{X?eF~m{|&k}bD;vc(736* z^zFi9i_BybZ}VE)@zlT~#1p!wV+^9?WEJ|g3#IF|Rol{QYG;pCle4@7)BMLZ5~^>h zN_*Y-Vb^PIZA4GKMYBgCnaNQ_AU_#eS!i54LH&&A0bkZm)e*-4#h0)9!`TlV0@soa z6y25s3(plFF1k?tOCR_o&ND2Kn7x{BL}+9+`4*tW=7W}z?j!j{ugYDKG5hvE!TWvM z@_OW3t5v)uQG=-``~Aqv@4i4wYeTM%8yv_d???p+?r*8LgQh>f2zbBJz+1kWw7uYb z9HeXaxyMwIJY28nHClQUG%QSAtWjmG zZQuVAEFtJvwSJ9#f-1jkA2IE+>k^w=F_C0^oXbrno*&t9-b|>O|Jbuq!H_JqyDbn#><<(TRl)Mnv zkAZQ^?A;A0H%LSXtiYzQtAC>tC=rHS>8hcD%%A(mw<)BY5A18La<5$?w@N|+KE%no zPt|xyVq+O2fOja=|ksxez-DmA4URg`TJJL zeHHIY0O9g8YO)HeK8V0HX&%;Bl0}~4>(nA$37?8wi9FDvuful9Fsf=Nc~D?#X3d2K z5`XU{{|v%%ZPIdEw;HA4>IAS+XH$uBs%TG>pcj@|ss(QktyEq$M6^~OC`K({&kgt7 zUPt%L5Bscq^`Y^MfHP{`LP>VU49_T|9@2eB^7_3^D9q{P%qLtmfG+IHj{Zz=64hJo zt)aCS5kOlLceZLZGa{?pTz1a(Bca;Eg7vwt%w!)Mq_i@r=5kN79_Mi>zNVXl+5B4l zwvfvKR`&)X*AUD2oZW2gWytQ#-|+|vx?1{384p1R2|5W8u@H3&O#>~|m`t8;bbqCA zdIGn-VW4XDWW)1X#S6WcZOD$Cjf|rgh4bK`V8W&O%v_YX|8N;#C(jAy)=yJGT7%S& zMiQ+^QTUJ}s!kQ!W1w?<=+y(uOqY^#!ejzNbGyu*yq=QrMikX9bB^MeKtT;?PxgR} zz16J5J6Y%Ehbua)NKkWNj}ulTy`R9hCi-p#j2H?Z;xRBq4PiqwKRvN${jS-qNH z5^#cEay0id1ar!(liO}&tG-h?_p6bNk6YEoCVJIm5f)^=G;@jZfTc@%AkTUw)}F(a zj2Mv%QScfm@xYwaKK*wQ=$|Gjx=qV%LvCq0D)>p6Tg!T!~y0>TX&AG8g z8Y_}>mD5eEmDvP^$FuR{Pi)NKH@x4}a$cH)zCo2`RTJHbrfO+PJuR&jl z-e!)ao3`?AzAGUCKEb3Zq4cNi@|CXJf4B$OCW2k2pTeX0m<3E&Kmy($b>~AzegO)( zJ_K^U6HUrhWolUHhWOl3b7^{WsosQbmp>ABZ-92t%W+NKGfLn+#l9@R4FikM0JX;( zKnd0bKyCu-m$8iUy}_MiuEPvJd%LhA=0Fa1@S9hQluaNYtz+=+{iP3&vmK`ob*!g> z{#^6PNX93&H=(*nh%1d*xtPHGviz_EvTt7WruQf1{`jh#&HO1o`Usa-*f3Q~o%?ux zshXn^)BVWGC@vc6Tn%#gk9B3Nao8uk$Kra)k-dDblVX-U9cD%PD0QCMqi-UpNY|>L zJO9zI&9M%KnkJS}7g3|$!BH2v2N3lIVRksV7~zSdNAr4leMnQ4yVFMEq*!><=g2rs z(cvas`UFOpYq3)^P5nj$As!?wy&>BE2(9|wulBYR_WSGb_{u9&<4djYDTe(0ZUkta z8{{QlvFyRFCG)M2UJv|s2#nf48MROOaD2hgxnV-W_JhXKC2QHF-8O{hiVaJDWftfB zgUwN484;HEqFp5C=0sLhoHD|~=K55NT}KjZLvQu(skLg_O6tpg^s9g`67 zSzZ4e>Y)Kbl{x`N`6gj+xKh(JVGuDPXpgO5j`-^SEiFZo-Mj0^$m-Gx6?knbLj9Ly zzlp`V8YY)&myH-L-P>kgtbm#t5YNov$;I|(aB24xjW1CxT=$VD8)+80fn;+`a+NX{ zPtZ$()UvWRkTRtEZF@Q_u7|^G%NAFWXt*YXWEitCQykBG-m95chrh|KVgj;y!>fV+hHug#n`$!yd1A-FAI-bLe+*bI_#~d|Y0L7YACfI&9Pzqf z#`?H!;<@r?@7^u9~N5%w^ zU4r8U8Z{6t%sFQz`0c4|*`r#9nbe$8IWiIni-k^FSpUjrmtjp2p^OJ4noH!jOAOJQ z_?yGUwA}5HQ``l#zu{|)Nx`Ykx*P?}lD!Lcu5l2w!e%l$oo z2SWnN>$iC{n>PQO#i<~!dq#I?d*T!1<1?ErnXusq36yaM_v}z;&=pIv?gQ7y$EfdI zm#;1nah^9%luNsY)8cL@SML{R_sdet=x?tB zNYg{DJzW`eCrVs>?B|S*H(%>j#!p*k6xj*mFXJy0x3^xuGgTtHZ(x6Fo?m)Iyvpbc7u1n<)07AjFDW@Y2J#cDx{v1H#xWv5uWqdk(8cZ4$FAfGf8Rcw2ILxpULRzAr>od!Q-NuZtg7 zGTLIQIm-7>;C@W-UycZ8x-&lKCDW(^j81?whg@&S+964eq|K$I5A#siJ~YJ~crgDL zPD8v^^?n`K4rv?keS`PJNq#WKJ-q?%?Bn^6sOs%e_Gy=46s1`}eyW4!_%J(b4z`7> z30w4sUD_Yr^z$88k0lQgHOqa&SBVfCR_rZT-Hx^NWxaiO@ORyhcXpdcDbQO8xF>>i zcLxf@-4B_u?IlHk{-(<%2UeUD=y8$hxrYuxjsu0tZ*Er=eO84>Fzy_W7%TKsP zu7^K=SXpWZDj#a%b4E=W0Ciws;R1n>bLZ>J%RHy*fxN-~d0-9?M1ka4l0PG(Vjo|n zG?x?8(gABaYd(Kqq{w%jcs$85fF|c(! zh&4k?AVtPEmaS!H$vGWfb2U2e%chnO6j1EV4{Dl1%_8sbMFl^{buV2e@EVm^=WVMD zz_G`XybkQ*6It?Fa$aV(FY9s|aX!q(a~f0up3_w^)C!al$cxAI#oh8Z*ynI4I9fb7 z7K}Z$Gsc6RAUKn@e#=mkq5NL-Uey!dBv{d()^$xJ@Og0YY)#bHet(iC#Z*w?-F<_c zMb2DgU*f7*{CvM%o&n?EF5S$x!saJCZHtjR<~{O4z@A;ce8N=kK2H(EJn4or@ZtFl zdDJQQrXLHfywHq=Fk}WMg}j7c!TXsU=e8n_Dw)AUd>Ts9f8sL^>uR95-7&MEv!p#) zH{e&-)FbMII{s3hJ26$3)x9iasxIz0ME8F=mA?se`xMRwNRroU z2`w6){~wlUQm!fLAtPYx{Mf&a14{H zL;(4;gczG5%)kKUS2(MQM7sZBYnu?!X*u*!8V8{3bC0ZW62#Lo+91FYilFAqOADoZ zR?yZ1twIj*cj*~`9XyDT)6Bx)>O2-&cJy%}%?k${tlvYuDXD5|_2gKiI7)8Wc&$;q z&d8LEATXh|I;MU`8rpEdIr0!Z?Cso=o}pu9h)FI@Oq2nuR7TN^7*k`;&-CYv#)C{r zc9IYMRM(PS>HFQ$03Nbcw0S_h(P$M$kB=m_BF>iJwTgBsvOeN2W0>0gBPj;pJb#Y( z6>6a?jUc3pgYx%ahf(O%rVq<$T)@hoiBHjlIRO}iQ(Ba8tj-V$vFXe(+{S%T;n(e6yq|&ngD+y<+7DD98|$y1!vbnf<-l&! z%Sx}Wv<`TF!Zuuqbo1qk_8A$nbJ8K8V;M z%)>LJ)dZVE$ZV&lF4+HeBkT)FJwM9`Ch@ZUUA>H0JSFk0T7K5oqMfH!zzG&Gg}NzJ zD-1F$d6pB~SiTl=^Q@|nY~IxAIf5%dn@S8&v=?eFr=6Ezw;2r~YPQT^4GoRVKglM8 zflbnxy)QTLv!6VU#WMAt3OI$LkGN}-bXrrbH1QJUjlcCy?@v;fXz3^lw&LZ&W!*T? zPxq@dn%6Io&T2kEz)nRzoTZfbe!Nt1oXatwyE0PrNxy4xIfm_$oW}%CZdW=n<6Dq! zn8s@WP?Jh$6V67aQZlmJGIHP^5rY|+C=a^oHO1jNhzf1!ZzJF|}Dyc-l%vavQ)O=?2^+}vF=fS3*;!)Yhu3$J4+#XkG!f!Q! zH{S8LNJ^Kkr7}}zAY_yH=#c=5a@nPNf+|Q6G;F4KWzR^rijGMapa)@%y~FVa)+v-@ z%w?(2nVoaY+-nfcF#*lJaHgVf4!_tcLM7fO=cJGSrrKD$_(+ZU1oTc6b4&XiyB7{U zleHO#v!k@#8)^!eKt1w;V;lc@7uUFI@q-63_QY%#rOd9Q{lj?XR@J)F19^4)F}Pkr zWG7}ZyJ^`c)c3&~ehv&$Ao{k6Wp8zFDn=^(I^U?UPE(Nb87}cdI#jtE@l4hU_Q9FB z{TI;RI%~FzL<2Rde9ID8JN8xxQ}^KQRZBCtgEgq~*XoL`V3rldzkT>!PNPWX5D>v? z=!kZzHHp_nd-8X1GwS&x$SE6o)SbMt2O>*1F3-EEDZQ*yZOvW5Lv-wUD)S@ctBwOgVbku#?r8|Bz*I zBS??m3xO-=V)yhOCFlbQE2Z~z(CTvrU+rbdBXt${DZP>nU&+xU>?Xeg?Fb(wP~Gwt z1uA26Iq&MbkJ=xs6MbN@OLuRw&$nCMQMwMr-ckM1EETK3;#i=$`|QLSp3u!p*Gv!!Fm`&mFAxtF2pZ?5U)E3 zXnbAmG{KotfbArj`XFG;uL8Iu_2Q8?JX?w zl}oGgr+9`(%lRqp90FR&U%Bnl^93FGiZITzRw|Ywp1RL&bCrAA{#@^s`l-1V5w0Ow%l@%5u`T3Pa2;aw;b-}hCrXc?@bf^ws zov76$%xp8QvByaBO;NJ_U4B+&ToBf*J`TNxYBL~WW{MR(RJFf|>Dx*?hsHe~v#_p# zo>;Mzz`IEdV_h8Bl7sb2uggKUxEthGH8;JaF2-jUZa-`+8~WJo9KV02Twe^1r}t=1 zy?!v(fSZ5hA!5TZqT z*z-id8*Phk|2NZ=aNu|Gq}_mkG+k11kVu2ZHSlJ(Ut){KYG|IfedKP>6_R*EsHRPq zgZ!jBgSElEqf&!;GGL}38ZfV~6OgM73y=$8iR~_iJ}bhxA7{}i#OXvl_uGrt{Fg$} zcOU<*#is5ASOk)7h@ebm(7H)6&E@G7saPP2$rpK;aRZ(u%HFT zt%aY3R#r}~#BTiGVoJ5qp(e&l+M*z|t?h~BGtP$ub=CL-H9@8@Nuas$`RN~LU!2~u5UUe7vv`{jFSCZq2Y8gTWI&em@ivZOY9$5x zua3uTXCRv+w>B*X1CLGqo(18LP5ub{7x_cARIS<;7W#{Y0^7X#%08JVPqqaf4@|$c zd1Cnd+-;SE0sIQ(ihw-uDZ0$=>yjRs3o?BUmcHvwZ4^Hhpc}0-@;|nnFyX!dzCc9Zb3J&NrQn0uNfT+I!*X8j>UbcpPgpV0_oh6}}VW z_-C;u++hb0D{07{PWb0y`ok{&#*EUggW=j_XsE4uw7uy7ic*DX41A~3&0l`NW;+<} z?t%wvAFULizdV*+S6}G0?hEV z$K|z;))1^hhp_8~FaG(K|1yF9#zFr1o^utza5WsZuYI(tfM6V$u<~pBPq+W`cmDd1 zvl?Kyxm^j**F4(mmcZtvQWHGZZuGgE{PTuCtD4qb`ShDq^x)c+xrDdYgYOqc@^-xu zTJsBS|UXpR3Hxxm-JCF-tBBB^U%wE*8iDE${QdcbPa zP}ykCjkU|@DTC3f{5SG)*+5>7(u*aut=+F>g3)IF7pjOey};c6+r>F!Tm1pB@sEYX z22@Ru9ii&_rT&2lRo8qS>Vr|}FZZ3f-Os~6^t?XVlqJ@f^xJ7Va{qjUie`byQfSap4=EH2x?y#uY5vSlP^mZGwyoxC^D`BS zXQ{{8;KS?LEEen9)a3c{%e&^fssPz>VQy4Nv!e#;gyeTveQ3XD3-q4&?XKC`jMS9` zTDPxjRY+3w8covQG(2JJa0h6^Yw(`<_<&ioH|3YB#xXvq5`5S$mvqp>;?l31{nkPb zub+38U%S4BDLRdr!lh!cW#q^Y+te`)2`j2TV}#e&;eW~k$}A# zL1y$nRBPL?!Z_Am^1*7a@({$W8F%65>79dlMJ@EHg9R7(tfEw7I6l-lVOmPt}a!kmUoAkt#*=aOI84d=A?e_VcY<7uI80Ltr#6jb8j8IT-uO# zEK`wKaK-|9A@Q_0DbZYyb>DMHu`l40dG1=UqOC6t+BTBs=H_}{ory92lb!M_l<36T z(x3?FZC0=d-+y=b-MP5Txdnc3?U?uHfaOk?o@U<3VXyY~BVG5}MtT}mM|xPELoY_6 z?v1O(z|J)sG%J{ztt2M>O^y4i8%Ag#tw8PW5S7 z2+y`UMi8TZK<)8ZkiNuPHA~UcMo3M%jN)>?fUZn&XgCBO=y-i_NWF73PWMsuW%I`d zp<*2LupdcvU^I4|>5Z&v?X`@>X%tioQAKo)IqwMK+hxa{pl9#VJ!l5Qm?j*#Zh%&@ zXh+11b0HYx)-p8E{rzsE{c?6wucsmV2am%$5#WOeKwwFd-EtQF2zr8HXaHw5B$eQ+ zd1|M1>jVgttY3Bo+pHleK9vIqs1c|B2(nVKXMjs5k0qXxs&Fm#8zWnGW-!$(_*k1$ zDGc5hPY4YJ^1JUcdwADpIsR=H5R)idPg6|LYBh}|EoY^3P0+Csi{BmlhZey14&3L+ zhU=B-sEA4VoeZ>BtXb}!Oo*6w zA?CHqCYwW$SI}XzF|Es)Yxd?7n^v5qS(nFJI6R57ol^EZW zB|Ai|X3=@xxShEteU;%IRXv?mhK2kFV`f<~b`nOT!3O~{=P^+ zJZFC6#-KAJ7N75PDiFf%+3N3kfzdyI0K3Y5fUln({;0OTr`%qp2TbBIv`Jw!qWxz7b@89Mb)6< z=A_R})PyJV7=^Bh7yKgP@u741<^C(vJN7NNNWEhrIU{4SwbafK;el*DoH}=;w%xcs zWPU5DDan!?wv=l|+D-^QI0i3Unv-bwS+KlP@n4^YmHpz04s!30Z{Zk%S-!cV-r+e0Ibds(^U4+lx zf}IfPb$^OOe7Z4zzG1GLWi~>UKD3eU#acFGYJ>SsQgN0Va&&wX=7J95G_MPto8I{y z)B8tkk(Mi@AzcnKxG%deYy(Bnse%~@s`bvQs!e{;kF%P{=w)WXK;_%nwz<5(^?lEE zfwO!p`)r*4+hq3{Bgj*cWrp(UPJZo%h89gvZB?uO}bF2*xa3w#+o49v-VXh614v zy%HBy^0-?^JV)8tHlORXe)Qdxqi4QU@aV^y}GBYZYu~ z*vSv={$oYgzDW*cutNN7K0>wJ-B^iuEu}tk0wFj>v zmMY37h>vHUmlLp@5wrIj+m8eOc~U2TGSN(j0Bjrs4=i;_YYz5WMe8j)fz^x<1JfWX;a)9Sx5E)9z1vCnX9u9UApdw?7g6MAO9zVKI9eXfNl^%R7%&%i%&}=pmT=5I0z3IhbAU2z1718{ zD_(_i>6|HZ>#gWRQIkfmRy8o&}ekXZ*ZI}wkj`6^xzOUTRyrUf{ zVlegtQD-py?XipQ8QO&3JLepO>BI_WlV5#bY8n9oDxTI$Il94t3QhPc(=i*Y3i=bR~VtGaRYp652 zycWDb6m6PJ*P&PB2H+BYOZk}vF5G_7rs+3*>s2R(Xt|w{(qnpYXV2bA$Wadk1 zQ$IpQRPVaKsU81zoD+gWB<})K^M}Q_F`7d#OuDw-uL-|e2uFF>;bP5f*hBns#tQU75+p}M3xtjR zQkyry51o}XOZhtCXZ2YT;h2xD#=^@<0xd}!YFe8Zgc6DaK z2HQo_TW2GN@x61~0rav%*=1wcX{f}x)8G^DB37V_Qrzg6{e1K=_eSL&5%Mv5d$XzP z)-C{<`1B`)&^+R}6ySN)o_$(@_~H0Hdq?0M`XXi{1FF%W_`F|45PGWy@Tfm(G;ZN! zVEZ_R_@yr=O1wV6OYcYEBGp69^G9=p%X&D1^JhD!dc<70W~JYm zhwT-DIre4A zd~rzpDIm~2{whF>i(%3MQ}9l4DHT9NxN$O6J^FW!;y->SSY5I^_KnI^Ofif9uFwmJ z=v`sxMT6u-le&ms&|>Svf*?yM6q<7lMjZiWdw!VwB1M7|In!`N)qPh@I?Uel9cwtZ zVm0=4V$0VJnpw7xea@Gw#Xz~{sxlnN7ctML19y+uuyQxv2XCIeYaQ<}*jDK_rsC@^ zadTaMIpgdL6}MZgAl4+hP_iZ_Iuqnt4(znq;Zb^Iq3FSk?; z)$5s=oD6rWONpHmKsg?G6=P5X6!_}^PfWU=bidN>+iI5XYwQoC%y40pWGzzSFnnjD zXWn8jb_rBplaV7GJxwWd4g4YW!?+OlX&Dyeh-6WZ^|Nr-a-5-tgP&t!Z+6Tpuv>(6B%mj$IR?jvNNsAbkWD- zXzPp7uh&$`06?)mEPnq*?w|{xndkrX9-x4%9Pg_F9r@_E^(ONM`EhuHux0G);*@qV zoV?899X2)q+=Q=tOJoz)B;h_|v0t5$iA8xFTTp|`2B`S!s=|PDPykH?Mei1p+9z|u zviP3R1Z$>KT6dhJK$(SYX$c)g9LFpt-21}Rpg^h?cO1E-$yO zEIk7IO9SyS+J~{;RV`|5`34oT(@_dGc8^-LXAwS!S{5HwUks89G=?)$C{cB25OS4; z=9>k8q|MsRW}<4M!7;G9-*DWywY5e?aX28>MdeU)Ke(-}?6l7-Z)S?iW_eGiLEN0SZovp8&^UU+lU)gQDB zc8WSb;O(o1nv;3_G^Gy#Fj*M5C)23M@ZunR-o?ze!{iJ0@3dIHl)9t+a|kNIhKkrj zZ>|Dhrj8%9##4b7;eunT8biz;#XJnURB_S-WKwtfit9jIyg}PSO$58QS z=pR@Mt$@VzaaKJZ?PPd@88!P(N7!#;Z%&~bx+rK@KZ=C(#6Xt8A2LDJ`42LJEcuc_ zfsdg8fDzvMeSI_mS_DX@PJol6jLP6*qh7@4hADe$Q5zQ~F#=M`6=euh#S%Dr9YO?& zTdhSlW7N_4ExwSB?>Ke=!Y*OghenJ5-{|}m*jUy=p|ccTc!8DNxsj2g`tjum|19N{ zXCoEALE2lbQFrU^aQvg}#S$=XHR_pxCCurhT($|VQDrS*Nb~U4WvP=fXIHF;5B)^y zo=~F>?tQ#m!~kX*^ue``x8tiHy25G%&FmOEU1PvrE<%MYj~M~L7dYlzD4*0V%Z-w~ zfpuE<{*340(97+Y$o>7>VZp1_?(-UsEB8rSJ#1jSYbMN2H-!?<1Fi=hH$T>UMhqmDmXizO#A(1#_S%3TwG_e~cFegl-JY+YGuhcH zKKA7z{Ruk1yx_8pMc^R@Fa)T;0q88E4IXSk(s!yjqmxy*y|UfeS6ugA&S(j^)t(wvF!=YC&dz&C$jY;1$CYP01I^Me+q5ITZO7y+HEqu+ zA3&#RXisP5MTI{veM?Z^^h`|p~+jxMOV|E~r9n|1x$Vxb5U z{G;Y80|^0ha|nVTF0L>M7Q)EWsBUEap)R4d1kv zD%La-!qx1L%=M=^pH%f|xKTfSgAtIv>G->!@!yKX>uG9M_H~Yw?Q<}8nX?sxw3}et zUY@zn>?R(mxYlvozv`e3w4N`V*oJk+KX8ZC`#=NPx2<_Ob9;*obb?ba0Da(S+r{lS zkoAFXE?!i)HevfrifFcB(Wm0=$dv!%fwEhxSwV|#@qfXKzuoBh-v|2N^uX9J1^@r^ zK-DTkpo6;65{g4vUfY51|FHqOopu~sve3&ow+#sH-}a2x?%)vX9@tRPqoi;BDu3yO zIO($qHL|{)lE2F>>Sr6Ooy?ut4vGKYC>Zf<%dcV0+3(vwwQN(W{=2dw+y1Nj@_`f7 z3A)a|eQ&dVzBk0&na?E+Olvw83p1bwVD&UZg_s(!Vs|5pt(c6}v z=V|W?Nl~%eBq{}m0GBEJN_kS=MSxj@@^13QkU1;m0_&CJ5n4(UmFvM}8*dlxSKf_g0-TDiF%rZg3U6p9+X|0>-{t zs|gD3kD20cvhKuT&Rc>PrNB+GfU_AeFw-Skn8td#Y7}+@T2&$(cFKM^kw;jsJ%)*@W zG|m7}?JScdrR#sTs5`s)=ykKI%+E6t)+wMZEN?HnWz+~ew3yO>eUHP0mRp$1W;Li7Rx#<4O1T6nLC9!=YJP)?z4%tc0}8$=5*0(U8aKm^j}>3;WY zAZp>;$nKXevj1LyJ|*LbNjFSI?&;EOK`IO+8tz(8I|lS8!ct*l-&B)s|GA&-Xyr>j zsH-M}wAqXrSQ^f?iyZ4s#bS}IuE!cz5By@LhI%5J$Q4s?b5a{M}8Q9db0iVI`j3GX4 z;E)C6!j%APX@BwWYXA{r%e2_~-CIx(>Ns!vo^ee-@PZ|Wg?*4nM$*A0-`-)%tM&+K z7LqaJ@;VOC%hv_4jGyaas+|T1`1n=VeZ$Sue{y@UDJscqJkO1}b)nOGyv*JhVDzLw z=>)4hEs`63D#Tl1aKaJOUPOMk^BlK34Oqb#`oY(H_9hW_A zws1`i11r7RDljBHRG$H_PSsBH4bf&V!zF|1E-abn@T)^_WP!x#`gE{tu zz9(>VtHz+_c(@FXOJ6ZEZj?HREhi;eBx~dAsD!PT;rBM6;@r6HR0R(-QlGT%>$@MYS;+H)GEMIzb5wk;3cfM+48ubYQY+JOu1daMuIWkk*W5!}YTwl&-0QHd^hdC9Nv}} z=s{}yTmmLF^u+t|e(1XO0*UmXjv^RAb<9^Ew9MYR%*~dG^)}DjS-$l_HysO20c$2t z8ag3ahr7cv8PKSX7Zy-ShXnMjy~?UYO;rI3){}vXmC9w8pd0)K(X?B!%zoBWi!nH+ z^#=xzT%H0XQv%KdT%Z!e6gG4X(ae%ZIk;FGsov~^)X#JUjhK`yC|$4ZY9oJK5Y{rd zY#PZ%=N(B@8!>L-6|!m&Yg}e2?ifLq|5OrJRl7*d^tVMOjA{!o`y?5cq>24DzEy#m zd@k@_ReYSB9c62^-9T?v-z9%uhz=(;SBoxt4tOq*2<#)N51M)7Mb)tBjpOw(@s&~? z)T2mh$&V_jsxTMlF|luf07I#p*sQ|_`s2T^O99N0n)P=)gawqp*eKAUFpTMKI;2B{ zQCA6{ZMdSy<&-u!%k2rs4nX;x)-}RV!3AbRa5i6xI+=hPd~3#XEH%!Y5a((fs=o{m z7);!HYFaV7hQ}!iZfC6Yg_@pmnbgg7_np(h)IFmy5KYuq2(>smcfch4J+=IYMd~A0 zn-7&IeTRiSg_Y{SLdFrl=r*1LKZY91yR7RZ|K#hzH2IPdF8A$ac8@j-mDk2^0rw1v z3FulmW1#GKbD8B~QX{*+r@8v<&?#4D$w5MGWtR?|nh}VJc71j?4CF9oS7BVY3i<>bB6}8sSc43hsHSce= zsgtkHd|Bjt<_o1JuhUBoL*HG#bFM|Wxm}&?9>k75&|&Y6v198I(52xCVoue7-|CmT zM!as_=M`09)RiQ;iee8ab=4FMu#u3?TP?RM+a{xMs|O|^QdlJ^0sPd<*KNpc8GA2X zzUz5xDePB}nv97%h+_lbC*dva=rc{4Lr7qt6d)wdR$AjYlq&Ocu?NeVNABsVM(OekW zh5Y;$u=9PUGnz9hk$#reH{fHK4Z?m_?{OpxXi%iHDK<3&vhjR2lR-V8mHD@X=aH}M zq%^!bkvEIZHV*DPo6V9RHu(by-AI=yA}%9@kJ@m1od6qDfofwM_!DJhH2N6~N3?`_ z44bW#@oLTB6gKyZ{77+aJR=V+IGTI)`ZhA1U0B`J88IyXH|ie_lDmZa=fy4Fi8dJ- zS0EMzngN3}PfsIqrCutQV|IDppz8X1=*6rW-`kUT+K1qJ5~H_7XHFw5cZR1Xe8BMK z>Q{(*^w)45a*wqVVNTgrcmW#dt&32$e?X(lCq<9N8C@a9xN3&|@+C@)i~`VRkWy}3 zv(HxOVW@;nt!Bl$fqLgjpGEQSo77%b7hjovbSbA74 z4_s*P6;{ay)7C9gV$K!`kCP@&MC(@olc$LI*fD_j2I4j&iZ@%LYf#!m0|W zCXrc0YhFilQ|4tQ48zP+>HCCoUvWU8 zIVifuu1*-~nJ($bye4aA6?8Y&j7A1y!c>B5W8vfL#dbiLQG*7Y!KrQy z#g5H0qfx(bYvGTV$qvo7MqTHgo-*nIxOXpdve0I61}cQ9FNDOm?}JxGBQ9$TQ^O+f zKe!EdF!83Z=>sQGH#TT2cD+2|N&22OR-@|Z$Fr&MnHMy#CU?zD-U!fHjpHajth1_T z;Bp_P95xAz9PpMnk$bv2N8A#~mI0^t526;Pc}dWF)hbxC;d;#$rv3fo&L106=$q@M z{0J$dSnfy?-wvjRxcrO}-d?2=FuN+cT!1UPT+kWaS-x^PnINkQm9R?T96tAxSy!z2 z1YHU`xQI)uendrPH{mgp$5lPlYNP_kA2&Uh%~-vs%_2!{6_<^Uv{?-z^Q%Xx$XX$* zPfzT?T5rhVjO-`EI~dL|ngdMbDtW(2@<;vLr(ujJSHgVfpIjDSV620T&(oZY;T59~ zZ!2NF|DTCbS${{2N_A!s9Gfv*xgI;O`UaNoFU=*$k4a0ZeBxX@=KyCF&Tq^nU6DB` z)Gf`<$hrN|W_6MpS6IPu@1?kJJ*<;m0+@PwDuI0&!Xt7@mn}*a^%Baju^NZ6ec*O{ zMaadDj|FkLHk4s21yB@R^x+HH3WMUP70rjGOMj=0!&4 zE(*v&4A`_|FMB>Mh^x}sK=+9SxVLr9vWYawef3BqBC`t(zuMfGb6*{cPFKAKyPKVCgNN92*yW@GLAg;B`00kUE1jqWKXyqd1 zw6Hy@n?Le{?$o{M9x>rC4}<~DzyAS-{&$F`D^GTHDs;pJ5DWcIW?Q;HKFk(V(mS+> zz4Uz@;AuaGd*vw)j3|@)k!qDZ{22uXmf$~0R(Tp#fNl;?Dr&yubEfi`sek>RFx0Q~ zNB&w{!C5BhZ;UjbDnDPE4m7F}$y;X5B^v!%>v*cd z&TI6OW0H)LiWpt7b#WConrrj~LOhW+&>x8*3m&{Is1GnHqQVaX#CW*qqvcNX>Rq;RWS%I+i($V&w%Xhf#rG#&V>1<9f38Pp~L$AQf{Pbt-!92&!@FE+; zQI(CZ++q|X?0#Eq%)q&$tZMNtE%8BD?bVU*v}i8=Y4kFg5XPpuXRu3T6|ovA^XeQK z&FMBw|NG_%yRfhC0c%YfK}1SSj$*pJR7!f9&cdYvM z$CE96Anb{vF(X;}z#D@4Z#Po*JkVuNFll@MMe}?>9QdcW5)WYZT=h74j+Y+Fn6}9m zhH-c^ZfCirNowQxN3ezR{GLrCOa006g;&=Zetgg20QRB01^Yf1>>ms2awq?!t#>A* z=-*GVj1KV1Yc~)GCf2{qPN=PzgOjd?knk4PGIE^(ZdSCG4elwUWjGYS<#Q10Ti{&R zRmC1rko8rhQWI3Vzjx8{Ygo)OKqaXg7GPWeYt3!?5b$C;ZKLfbi6=MWFtkeZ7s!0o zLp@(pmJay2v5Ceewo$Bh-1o%`FQMhx=5WG^b0j<+EPGW$gR&=Avw`u}Kd<*h^Fg<3 zjgy`xvcr=2H&0<@Z2Ia_Vcx>0SKRYfShppe2c@hg2n7`ntvgJ@qnuCigKzpRgf*9w zKNA8`@QcK}cxYbhc5E1O&lf2X%rkf&IAcktg8-fCIca~=RNVx07cp#OBEGG;e{e8H zY%&IIwO+0q#2Ij>?retS(x&iBm%^YfpYvh!b8Id>`gkzSf#23>G0bNxJ2=Mx?M7h6 zwX^3;T76UQ-}A4Z1J3c4O>Wi|CB|mBqL~pTGnfL*7Z~uT$SDh#xO7HeRIj!DvgL43 z1>R35zG!0uuokJZnw2Nk1{Zsjaw#UYN#)`X;H>*Vq9W|*rQ~XIfv(_Nmw$Mv z*|(#MD4pBgjBwCUP_2j%P)r@NGH0%SY|PjoMd4M7uE<9qc7LwiLqRQbHb?sum{(>w zo3dy~&J0jvn~m8YtYs_k^*5;36%`i3=ATjBJZqNzgiT{TU)&FLhBKV(H%93k4m(L~ zu?9J~S`-D@)DXs+=%M_4-?t_9b4o1O=MKN{h{c<}#2V|(aRAD67 zZk5zPfvJkCT+1pp3{*GPRsnD)>iCa>__WJOUjyJ}*`~r$2NHe1f4I1Vc=ioAR+Dp! zQPBK^N@6i%s7!+_Z5r#X`s=*!L;RmJL8yi2R6YTbyGzCoc z{Y4BLJ1+U}Oz1psr;vyZ z$1M%k1c2P+c5QU8jgN^Q3&wXX0{o&xb*-W|VI7BSvanW`WDr{I{9738n)mgC*JpqWcCrv`I#3Ykre4A^SNm`A5o#!_1p z1UC0v=273W)E~;Vwon2Dko2Z37REy};#_~fEytc-07ei!GWXz2UQLBHKwqq+VW1LW zVE;K}Y}Opu#li;^EW|q&9{?hr6ZKYn#xk|FdF;}0uRsof2@E&Zv#r4~FYDTIag$sz za^gED8-pclU)@rxtE-ZbcSAcjcRtT*H`tLsK#7f9ncD}rvVf# zwXfl1L@-+ODDt2-JZNSE~UBOSC^A6X%uQ|T~0KT~kgJlzedAofZ^Z_+^EueHDQtj8+T3Z=lc7krNg z+(I1fCt2W5HoF@9DB<5<aspXVt(GVF_VRp)Zzf?o1bd zjj)^LdD9RCp!*M?s_Gteo=s1(FrMD?AG&s4L%SBkL`HzB9_Y{5*wG*3 zkIyOsm*_&MvW_Wo-Le(Qgpli~1An$aJ<^zYK<-|Vb9 zyli-6)q^}nL9k_&oo@b8hr=FYi~Ivm_MzSbAvE-r0Dbz)sw#u}&(yMbj$f@Z z$f`Dx*ZKXV@~}c5hxcn5N8#M8zd}}DV#7pZnDcVH)s^22jJan3lf3^W8vQq6@PCO$|8txE z{|(XT|1d7#obh|@f6?-~{n@;Ij6Y;7iOsib*s*_;7_l=``on*-q1Z3wo(7KHvAcTP z=l7M11HZWhQZo6A$T;_bF1K+ zZYN1!V=_RR5w5RG}WkX@Ro_2Gw9I}FCDS-ZW zDE{YL-Zk(IWq_`7aU6hZsn?oriAPU2=lkjh+oPaXfTx|>GUz$g0J?1BxR85{Fei?O zpT6JM5gWBok$#R&$&XvOjOqp^S_~@-y>oP9Nc98Tqn^qfV+(xz-_7mn%OBtIyuqk{ z@42|bHn;Ock{jjlmK{Ad-M>PthnbZWsXJgAmDh3P4U^z3tWeoWS=|7uM!{f|y7f=J z5jw*DqpcZK_OLpE4aT;2os474o#@iJy)N9O$H2$Cf;KmVl~v!Lz*5w*oZLs)_=^c_ z)i&VaOlTP}H!frQ5U~zVsi|sY7ASUqGH`AZ$_24U`!f{72D7+P$Ww!Bx}5N};TN90Ze zqc>Ung?hRs5PeO1xbAAoYi4~dajow9ZU1~e45pU0?)&V*_Wq@7})Z4Xx^{~ zMVD>eVhiVZ_1jI}w;D}Gi_%m!*F)C{!peyg*V`>pl1DMkV}3d{KvQtKHX$EvQF8`5 zp~Rh0tvet;EG#f5Hk{gXZ5sQy3VL`o6xJ zpzQB~lxS+Mf*O63qL~X>bNqSiOTRF_Hv{9j{#E6GegcNeTIK5duH1KE1~w9VssxgU zuCrU{wJ59>XEg?Ry7mB)+aI92>LIiaT}%Xs_I;)XJ9s-dWkgXPx@f`8R+|Dq->xKu z^BQ+SSPCQ|3bZ=vC>lt46u~ETGiYUj5_OXt1A^aT_P;nTAjP!>m6H@3l^@JRTgNd; zx|Gj1$-K7@Y5`N_(YFsQNBXjQBik`XESNzkLd0Pksa^2EpHNZNx0&aw8r{gc>(B?q zhy&Bjn`gbi02PtVR!dkAwki9e`*5cKB!fkumniit4 za?BIe%0jOk_tBknA1PN0N7dksgo+Hr);|yJqh+ho0CJu(o&02ox2n=me$+gzV3-yy&QU5xM?!Stgoza3L+K*uor!~nu;Q=W2J3%r@4+57dFKyOlnq&Cnl`iCCY?=DD@d~oL5 z#`=xLhs|gIGWwHoG$iZ;m?w^!6&{oCE_d`! z`AqmP5~PKPZPt}z@SH>Jnz zADBrX22fuBBp#nW?zy-zO72@~v7NJA7A{MwYcLGXC&6MZjdBGYwWBqxa%s(J1Q0Pd z0(f?CY83AnNXy<65Fn?_Oq^Z}eLfU5xp)ls<)O;0qZ;Yw&SE}Z)TmkKYhzISe<*dh zJ&~(zbsIW}qO1*fbl_%{5^VRP>q-syn8+We#uzadZ`>fq``<_q`zpTR9A7cFc}j0L z_^{d^J9cg3OYjvA?dq@fN*}CEk_XlN^-j7#OsXrtk$k)ZEHbGl?+_1l3|5#MtS#v- z48zJ3KPz$ibj0yg^7bP0D5wih7z^`@QVidmJK(xTLa7y_gj5+euhYo5mtTSdQ$r?5%~9X$dbS|VfuNKVv%t^kphnj8H`JgoEDJj`Ct>w6z8 zMEd{qQuLe4j`Z!_FP*ODWG#BqRD;2&Rcg)Z9cU2%vz{2=9Mr*4(zPjEBJngmr|8ek zg|3gtT+G1C{IN1kx0-BZBEF`i)f0X~*8OGB(b)S50d~_!P*!!=;|j#!R6Wt8ZOaIk z1QIY()u`EB7)Gxve>yv+?y|pLXL*#R8m1@A^R@uZ&w}`iWHE)Ne3jS zw)5O~?D9aUVRmD2x)XLE+6aaQFZnGe=__%4$8>rsLo8AYXX_rw%F z#nbcS_06gTtum}bGJLr>X4HF8DAThiFA??4eQi&}r<+DgeA=z=d!1g|R333A!4({h zP7ejv*H%kd*Y?(7IBsmzD`lZqpFS)PS{h7RA0Z3g;m#i1Vg&cEs4ooJ)bM3yMZl zXqDzJLMonj$8%i$CVtYh3SkhL9kS>>RS_%zTAz-I&L1XNMr4-j@6D~48KQqR8pV9w zLH(j_@^kQ7d3THw;r(t#4WG9?=Ms_o_*T`!$W~{b>CT-u1l^zg!LjG`pI;7}1hWXE zDd+$FlX;`{42|ZwdT;OFJ+I){J?E`b>klRmmU18bJXlv^7ZRf43N6U9lN%b7`Y;yl z42F4^z5A@R>ky+_)=lI{9+F$=`dLanwUjq|3mzl)*=M6&8~IuJn)qQ`)u8p~2X@Sp z(<_!hmspfG7hY3vxeL#Up;Dwwp|zo$3r_V%2*gd>(aA$p2zNk_Av5)-%WUXSD!I zZ=TM_=b}%JQTD2%qoM?STJg8jRX_g9z+truLK`m_B(Dp3ZzX4$L_%f?V?yhzEex*5N)j>y*!vW$eVA1NPf`;Hp%1G zt9U=)iwHp3wexu{79yrIbYr4jaTRZ)odpb+kG+|#d5iU(qf3i_@{EQ^=hZmll%yc% z`OEX>y*wW!h#8jM4IN>TrJ6}hewi75>4CQB2B*!F4$Pk$moq#F>(8d|i}Ob+Asi%d zJA*f+qg|r7Ir3V49UhDkD4Y6E;la`#p(C@Ic|Fcfcgw3r@Ut?aAL5bM{3lOd$bj|V zx1}Iu;>x`&nkovT#Bu#n=X^P5KFb$9ufCWxSzjNoh04OD+$~ly;yKHx;k-zg?}jW+dOt@KOqs==(2YDi5KxWd znXe8xoDn=3-Tt#d)+=^&I{4w{Hw4qxX()?pj!w*#EKg@uXT>{DCJ%%#-NPhBJ0BSbCbY(Jd>c?9*t3q0@P#GZ_N(lCV5Df3~!&n>)r-Hhxr!U)iknC^8QKNG+=L(0JfD`2Y0W#!T$8ja0?EQhtdgvH z%Yz%-1#BU>%v~I<2vtO(%KE6!-uz6rkVV&w_+z1pH~PgRIBjtyw?=XwVaL;uh&0K` z03%&3RAIT^9|75BaTA=0&NWQ57tS=9C;*mq*Yc2=<(hIK}(keVxgdG^$pXXcoGc~H7RGyJ-;dd(O0#d{<3Q#T1OPy9G7fC-_h zFL=KDXMFpQOZGP}d>G-VIlUw7$a#wFovGoh^8h^yi741;!(AcwE`4mt$sS4xdhc&q`p3?dNlwUw9zY!{4wU? zu0m~(lMQK;y%I#INh*;jIN!;-`rr-A$=t|Te2*LI_6^qN?a2Xl&= z(7BKiyp;Z^d|g1Dlrd09~aK3so!&)`t%8Cg8-R1mW{oCnQ z&OKEIr2VlEdkt#T0yjLlFwRBL*M|h3o0g9h`UyXHy4bM0_w{ZJE~tRde5RkDgxdSWp5Xr?ouE9pV2ql!hmQ*u$FG8EP3cpxz zXxFfmG?%wHe@yuMwy?t8h#N$WeVHw+dgb=~^x7ajQ;j%u@%e7AT;FusHaE`{295UxA&zT6GbXOteYQ)Ryhkh!ru%aB*0$fIh)4}Nl&sght5Vz~aDlF_f081u_K)~3DILXTC|Vc0SK1>x z>lB{$YOvPkwVa$l{&pjVaY(F|}jj&l1w} zDIVsXh8TqNQtGw9U$C~D@e73g6^WPs%4vTi<^w;XD0&!3x_3>@P8qEVE%^0i`uYPf z%+7HJ$9%ayAZODVN;&BVp)6@v=XQbFP-^ zW!$USDIS-}3$vDj=$T4ZAfQ~_?Oc(@t#iUA_T|oaAOES%-1&vd+x&+iN(DwavBOrI z^g{{LF20M}ktxxxx~hk)1jllsHj<14D6vf&xrv{?L~1zuq$8dPUvlTR`l^*XH@^}e z?Kk2#-Z9v>!Ze&6qUS(n%`w!neVLzjV-1o~t+h9HRp*FOJ95{F&`QH_+Q@~6A=3g6 zM9*aHa_cC{(SAQE|Ez9pX@`r`u>5AdFceVJ zpBk8OGi0gU0MmNmZk4%SP9Wt&Y*HCH1KK23u2BB@!R1KgJS8M;rY#*TAFoDca7DEA zy-b)6Zg84_UZdKk8t$Kv@xIPC0;%n5U`dA;JXiIZC(dlS#v#Gp z{ZPGf@uQJI(S{Kt(q`SpFGzyX*5Y0iX8Hb5Cru(-`<(x=c^QLh8{sioS@Xv;%b^YHd-V2-XP1k1s);{c6BpV61c9Ztc7S& zRBDP`k5SJ&?R{~vUUJ!8ceinnYKGRM=I&8#e5rGS=P17)$F~Aj+fbqH$E~h?E6YAl zRBgXspXuIlnS)s1IFrbtHl6bKz8fgYXQvjBSCe7a;O{;4zM+?AeeZsggDEbrJX0c{ zSdJ9tTKYdZcz=d@Z4Pt02KleG%In9iycaKFrtzz4I6 zTQ$a{m2B|4C?r}oCxvVmX{`y3h}`D0eG=ULkgj~##ddQS( zL;ddY1szIFxs-2r{ZjKlWwVO`D0rR$`4~Vs5T)$DrU4~kZ{wVVSGA1Vjz(v>Ib870 zJIXze^l5xdLRjFm*RUDSNuykPJH6LOGZtZOs{}{F!DDbxPUt)C~wlW$?l4&u! zKZgbfy0nkF%B(N|ysNcJ6Mgj?7Cka_8&pG-IDtjI{Jb|G1PK{*6(%r*BbLz%l23hu zT=03D-|`Z)Bgp-USf2wTlYSKa=)kN8#T=yd_XQb^IT=YVOKC1|pI>uP({;QjqN3{M z63F~Q)feGeoSR=nkM=d~igNZ=q6}=bwX^^bK?dffp41oAE`$B~4hVVcAt!Sf$j zBYLhGH(PHuW8>bhHFhZyi=CbK=k7|5P7RM=pV?DCd>WfRxQs2#iuWfdyOenLs`=Ir zPlSt~Oh~xqQau{2l#!}9Yus#11=rdkcx@4`nj;uvCr;xsicXgNC2_=7eSI0^LDxur zs<+FB`rV7*o}5>8haC`u@h|?Smie!*J~g)^MR}vMnVvf7o~@qib3Gqq!+9M8deDu9 z`jASP%A5{GP651g#T5gPz|JMranJ|yKjFbpYdI*x|JI&qU{U*70kua9jitZRh6~bf z{8XZ3#-&n^1oyY6X|whRRYFFPU>Rne%jv4&-C)h$*CwODZPXH6cFsfR;RScZvpH`I zigwB))!59m<@xh97??!w_&HW&$m?(|CmrinVL!VIg4aDButA@f`!lr&YIl8np3kKT z!qH6i5?5dIha+2tGNJubl-s&0l!emOXsC<>rI{!&q2G*zX>(;)G1bOrU zi0U~zZ%NL5Pd#;wmmFRLi<|h4BSFsfT9y?uh#I-Nu4w=7K(en#eyw>vuzYa)KPP(n z*Mqwop$6BV9Zz{$xWCMoKG(Lf+`r)ZNBt!k7vsl+C(p7b%rHpb;sw+Ke22}{dc3^SzSuex3UR1al-Tw zzR(FKb9fjP#BlQm!uK_JzfY;*RxHtcfihv~C-2fTGPHr|n{@Zd zrq!?S9Z^cY%by3qa4VpvEW)5uVt`@blsmk=-;UHtmmimRON_INk+gbodUO}+;T`TB z!oee_E_uy$pV8LV-aDe4TdDN#qeu%<4UknxgbD`y3$yswcXxL%;n#eKA(ny;!b%qd zZvGMclwj(xT0p>`SsF%w!#mnbBt7 zx4{@wk==N*%wP<^dwQN8KCk+GzOP?@osQYgeeU~uU+?R>&N%`~2GsJQU6k8XhE&afOxt&NXFrGTexMlx#J2{GnJV#+oV5hOx68C? z!7{wrm#6%S!_BIc=i{9@Nl9x@_8&b0jySrwRm*aDVbY0x2G<|Onvs8Nw!u0Q?B7>b z4%RPr>Ha$Xcnpwi?BB%+52T~avLwNKJ|Df5b>RlTkJDMdsZbjSL(y_%W6S0d2bVb6 zL>u98qE%wg5r^Vlu^-sMW2zIn>eP}fEs%V|wt7>se*iQT7eH?S_i{|08pH*h59Q=e zwxhSWpmCC*-&SF2E|0yUX7CoRz26~U_vORGEWf#tjtVdDivhRS>}qi$5xnLYw-l}- zB7VVttWh5Ky+*L3z*3d)RrXuZ@_0^>=eKk1Qdg|Knw8Bb#Pg7QXs$^M;lC`}!xE!!f^hR{4_#RU)TFDs?FHdj6eVrzqP* zBNJzsF{Q`Ut-cFy25)rvJlHNRM}4wa`46rZUw zvrok>>Wv&eE*=>uxhXy!ZI_;W(Z8fkvhAw!2r+E6uA1Q zIBVYou>Rk1EMJU2_HIS4Ll7p1o4=9(9+A86aOCQYL=KzA&&dgp5}hx#F9o~29CgeXqzZcmd@Zwzf4d_NBA5}-u!?sQ67o5Lc6p4 zT7l*DjPabblbffdE~Xyr8o|!sVF+0c)3g% z44lQb5{!cDo-ixh>HbpB#-kY4O1~g(tp*8_>>FxJ%{EGsR3&@I6i?ugl<`iDmDCe^ z`Ui#4=EahS!NW#cwPH*QEc$LL)@Y<%KL_L=BSxD| z&;TXQbAdYZG=^XVSra?AVWyrq&Tf>b6qTX>B=Mb3w+(t8fCY!c+Fu`bJ1#7zH&urO zd}DxiNweg{huanwiL6Ne8`1OJCv3plleNKHc^~?wXYg>hh{{~MU&Wg4zQ&62PBuBh zil^Ufy;O=D^R9WycD1*~E=0LGiF;7geaMIAB-NzYZ_2i`&+kGnw_Z(4eG9Ozuh8zH z6G{ARc%7LI!lA*RP8Hf`k4`uMx$_bLaLx0erEhUua}WjIuRGWc#g4fRoRE5rH$y`* z2r>dHu45XEIPml{t|Acy`!KFxdF#p&cLaeQH(r;$NBj)OmOdB!YCHAILy`TAfH82s z_ahXx{nmrpD06w9(Xo@7l5sk7q2+RZ6j^fA{G(eBM}KWgbn5!}-IR!oju!J>SehZM zX`N0l=ZrvGRdhTZf$X_%M%ZlxT9xDGrOw^a=VFiC->*>%@OLja5@lYTZEjcv!TEa* z`i(w9sZ--#9JQ20EYC;1xt5pMJ=l73?n3V6R}Z-2dvtEWJNTZ02Ti*|RBX&t%KRcu z&W1Jb!ghDZ!30yhpoy9RWq@;?_U3a+i9nsQ$vVeZLF;$XH}h|C?g-LZ$~rGs@liTa z3sG6D?eSU135A8v_|Be&_ha1MC;E`yv2e}WN4!#rZ~P0U)zZ9L)Lu5H*jejZpv?gu zXP%Ff8?hLBw&}~=wS@=wK+QQfH`3gk=R!nD@|aTO)q}VzcN+;9xQ~L{dHvB&k55eV zpUBOabLn~=#Z|-vfD)QGVRR5aaQh)w*@FM_UDF;UY`c@&@&)InO}6zsIFuTOBNmzWI`{+jU}~qL|FxKhbPRuJlp#tGtOH6g_k5H>GKv z7U*mWNj~sacet>3WM#_BSI^~n@hiy|C`Z3kOR25o&&5*B#b;2my9THNp?l2oukW#c z^=?Fk4A$k9>q#a81UpH$}GZ*!fT1dHYmY zQ~j{bZeSLNO5@lMff;(B0@R>y05yI3$DK#ML~{n7jUHI==JSR7uQBOaPOYh5eLjSs zCh~PZ3xtVGUVVM(&rvRp$~Od~16Cd(WIo)D_cFQP!Ax>{EQepbS==4_g&7@?<%ekP z&(tkar34m3gQ%HA8}|p`KEL4k;bVXDt9!pL&M0jr7I3ytKJK+FbG_~KgkjN=tY)%0 z8%MA|Mii7$f)a5JrRukWarg-$TBIvS*cFhHe0%}JXR;@DWod3w4y=U?9%y^U{Uxml zP>S^m=0`&Z_q|fBvzO|VGKWS6)khyfV2@apzctxUl@!g{yAhLr3p$Z)>?(sbLLVaA zcw4>>Px(0(`gN#?w`tvy7veqp^Za>Yf>EAsCQy$>@UXxOm-9>uE<`eyM$_EGm3#YB zMq1Nm4*G)H(6^hsG#qpBLOScd&w7lW&$q~!TW#sOMeY=>fWwsvOE~)AbMbqBD%(+UgT%1nMm$M`tdo$;(Vbw7c&w3JM?u}X_dN$X~o7NpEJ zB~X`}r|!~`v_I8vguhs%KF(lmPr28qaVLhBEAxSrsVW9-%gvU@FTQu&3L`8}$c!!m zTB5}8=MA+e1psvrOrrMN%V6T9EK`LL>cOe>MtOp4v17A>s*V+$?34XC6~plF^A$hy zG!mT2&Vv&Hh7mc&DMv&C{|e!XCbHwW(rpOHBKQo1BnxE>2FxUQ1%A1Ips9gYS;|FajJISpfpX(c7Vt=+(`ncqp79^g11b=}#HnR&!N5%S~ zI2T}u>xn-*RG*oYDQ#?+ zn4oa^U$PStb518Tdm$^NrSgd1%pk6LY4S0ZSISt4F`62H>T}kizrOrx7@UciXM(Ed z?r-P2fah!?b(O&^v6B_u-Je+imot^&J6Y)G?!7i7i?T zS7{r3RUd}f+H>^!Tia~o<*T6}IA?GKK<;vzJPU5~&5LXD-3sznMAW?vu9%n@Ede(F z?R%|=Dq~qI0PvVhy+?aT%FjF#=*TsZ0U`nL(qU&kGm~$_9wI%ZVrvoHw z-l>^`_okqcxWW7zl+FlETKrhx08ufM>mlIV!R)j%vMK$4efjXM05cd37@WEy8Sswz z@y^--9!EuSzyA9LjX#>ip9r-+nCn}f=-_}YEB0#V>B&r5%r~p|&5HUL!#8LqI{6$o zmzIj`YQr6sy{!>}-EXdC$UFm2vG{c1n%ObpCO@Hzo2z`b%+|+9H3Hz%3q|=XcnYSY z*_OVHfY-{j&U$`pP?Hsq7$?>OTG+IGr1B;f4@md7lFry+X8)}$4+mqZ8@{d#$L2(} z($qU&x!MJHb?KVT68T8Qg5`)lhd4t(I+BmCuFPGOI$@=_An;U3qi3Mab~JNI9i6D` zk~m5K7H9LAA$jlnP^}AJCoS2lh0Em&wNq*PQb)-xlU7>nk!zmL6F!B^F~Bs(JAq5o z+*yEvQmXvC`rh6MZA#T76^9RA|J>b!YC$u{j`To$N~Ta)YR5CfiI=(zqz_E9Y}>GD zNb;STvkDKe;Q`vo0bkzVZ{}5>YSstzAH7phrD?^jL#ki}^v~H9T7{>Bg2tbn$vPMN zm7#g?h{HNe-Pj_=%oix*-M(jImH#pOLnIE=xykB@rQ#VgTJ=bzzg#_Xm-M4W$s5M& z@0T*JW`;07Hj5$bx%Xmjf#N9H1~J=2%*<=XYCm;JF9VK6{4rgna_i7PU=wXGL&*yR zFXXKI6>T;bFrzuUwC9=>XMq$D#(6n)>?**HCD3qZ!e<5@j|f2B&FI8*<7VQuA(!qq zvEOn5x2q{G;C!gfobT)a^=jug|7ZRS6AtXgwa)~d-1OQioCSbv(&udJ{O<0SW~3RjzD+2H zb_u#R1fZuAJJBZ=k)#xF9a^UBQDk2qosoxe@x&DS43&0EZDQ$W9BY#kv%Xs#5MtFp znrWJhHG@R$kSiCRkWNO>`fffeEnF2hFlzv?lEtO9n|K2_Yewm~o~2dBO!FI>|1;Om zc+`s^1xcLwhiq^L`mDow_4Y{{JOu|i^pZy(qG-~IZ4?)_STCd`y5yU=Sz+t&QNcZ-&1^aWC z5I4cNOE89^{A5EjXEZ3)T!X8ihNS2Oz+&WWwBX&YGn0=$sno6ZSh~k1x#QIi_pSh} zGEPU7ZGBR-+4=^v-vzBKj~ln8P4&%!7uADnD2y(u4tRvhT9vmA@bDNB{llqJJM}ut z0D!Pc(cDKi9NWtPI&~J1(geBrk$d}(TIh9JUzkvV^aNurV7`XL40E^{|s)4Zl--uIoVgN992vm5mv~hZ`loi0_bff)js973rAN@9H~!I zUC}-teo*aeA40;*lX!auyO}f!m}lSG%hL~&r4E<4k^75{eHC`_9g$d0?T+IlvxFlp zcIpgs@u}!Yb~cz#7W1WkbB?Vy=AHWNA~Z*ye;OKn+^P0|a%3Jj8YP~662Q?jZ+{XFMSo03K_XtBUNa~wNuwt}hm z?;lF;?N1W*&If4m?xdd%Jz#$9rC{sx#6;&=V!rv1pKb_cdQ`dmEsKRjtf6&NAr~l3R06S#g5EFb-%G zOVb$2aDr266v`TV?$cvFeY3h&+50qy2!Osat(x@YTP!WH4)h>Lb5eX0)#$kNm_eKG-k#Cx$Qd}nv)ZY`*r z*lUZ@H&=77e5%K2sACL9oF<=3o1CFm%^CZBeI#|EZB3bIB@UuvF+e+L=r48APka0q z=(v-pf~sz^EW+!)R(4S%uC0JFfM5K&ga3p_dhRNVtaZ8$?Ivgp?G8;{few; zhLCs(e$|x;+Lz|5s|)BLHPs#Ctz1m_2P1YH6>h|!FKE1y0=T@|l=*85N^(qdnOa4r5shpN(O<&_@5K7OvzfA0Mw19d5QM>WI4(AsqwD47@D z2Bc{rTusX`7_x6tVSD(z^fvziJR-pBbTD@jkxpVM@Ar2c@U8J$v+KgmT-`Rc_zgQL zd;M*-TG!;zf&_aGyT-QRn`{gXDSqS2LHK?pIMoX4Qs#d;2B!<1>MhJ5V1cA82iVrm z?L|Rz&8kMoOLn_9z-!ng(F85+vvunq?7bAAn~-kLfnr`-2PtspZJw3{Y_obaD4>=GpQw!-{hV#!y+g_elQ*Q))oN~KS zyvWan%Hzt8t~@h&%TaV*L)r!Dzx*tE^3BBsu3MKnUn^uwi4tJp=hf~_m(b}g1`MtO z3~2Yw2}s<&qZU_>nC8dxL=1GOmAdv_@h0H{;QVTy_G2Z82s)r8Kw^Dy>i%{^XXiKW zxcvPG)jajeT>HdG8231UtcmGxA(_GP>X6NjhZsk$z4;h$c=@d8jV++e+mDz6G`yGc zD&Xd=Ten7>kmYIgWgO1Fey=$`up-&BLE3Rk0OI{dJv3x$y98-;bShPT`sbYacY$vT z(c=m8{{HcLtFly0^P}SmUWWJ77MqWMLW@F{>OJVXRV^vV^>=xu1#=>Zj*h0un>GoE z8S3d?MOkY!N|{#O`7Vp$CSKZ12jD4)Iu{;?o9{R;SqIs=I%%^e(7g!OQurHWN$i5; zR_n`JIcBlMbmZNsS2&BVv!6e1ALG?iIKPY|=bJ0eHRQ_y1!P^n_}x}yFZ-99X+!)H z!s>1(Yl`eXzM3&WziIiR7cSzKD!u=(Ro3+vDZc%;3a7&H>r3vYgl*c%=j7IheK3(% zoiCF(_OOIOdqBSwd?v0gfjZorU+FzA1^|U`P=?DhDxQ;_ORT!}9!CYwh4uDaRGMRP zC3#=mBg4O)unj*@XFtbY<ySF2WydC^`T5qy)!OJ=Uq$DKxEg8(H+j2Zd1_?U*| z{qsLknVxCYplNjgnB4N{TAry9O9~u~7y6fSuAi;nY*lYB53PqmWXsg}^{76(3WX`* zdm52L?M_NW=z_c<_w#J7p_+)k7 zIvr8-1`VZLlk#*$SL}t(WStWJxwKfL_E9h0B*!>Ie(%Pe|NYcQhOtv^eb7svWX1+G z-DGRtGCvi`Pwsi51+z(j@k;k zA;!=6+~o41BE}CDetCOXUNAHCg4NcSUK{@8`DjyGnpge(VWsuC`Oj9hi6|t95$04~ zMoBJKbcCio>WG6V(FckY z&`+-cQg0JYXJ1+UjMzB+huH&>;RBBoH@xE?)5<1ng6M>*OyBXazxwcWdAt&4adES-F$D@CyL%zzDyx?O#ag$s<_NFUpMV`Mt|+iT?sg~`Gdv& z$b^MyGvoA)glN4R_`kOz=h;pBdm^OrT zhc>MYdA#NO5eX2O|7Q|>9_Fp1j((Ub(_Z?hX7^(+-)H*PnT=Mr80*M}O}lkVz|6mm;iI&6=E&k2E_pyS}cBg+r#0T1{?@ zn6rtBKM1qOj=vZm3f^gL_?9nQ09p<{yt3if{QU`aLhR;TQ!Nb%PBhGm{H70NC#C7) z4!M363++TTa}y2Y5?HZ%s*vh{N-AQqoV6U%e#Mqw)U#g)weB{XV6RYfbvtLs*$xcl zq?k}6S*t@u9i3lN3{w!4>hOnX^{W)Go+deD89_ymwcn>S(3JFHah@Y@K|G^PHvAtj z6+OT;_Rts+_0P{YWJ9hEXR6q(UuYzO1(+7Y+^!Jx%fW^L6lNGokfvG=c|SE+ZagPA zO(e@D-d4zSVKnDMpcQ3LRKV9~+I`HMd49*|;P|6kxD*X|1Mslzfst^f+~YmPHFAj-qi3WjGJ%tWziyNgL$^afAIAE5PxW0AS%Kau<{>nu*a+q2=s z{*76SxT#Air`*S-HD75Hx?DL~D3GaL7+KiU1B!u{@FY3Llo5 zk`mn1ooCj_O3heRcwPZ)E56AVI3Gn$t-_ZX1}4DC2mR5qNYF~*P7$q>w~WL!ohH^# z&VMhd{{dp#lJ9^F=X{gnKGsKU^k;GS!kLGx&V4Xw4bh$?qNERICtms= z;u7+4PgwU1{rW=8356yO)@&C|Wv9^}kZZhTm;Xf`u(xl^)TkrYh&Fd)^_NQI;mM^Z z7zyt13|;dDh*q`Snjg`olF&``f@A7|izOK?uym!$lK|z-y z5kSez0-{|sPzC+KJRD}s5L6em8B>a^z80Kp$=A$!q%DC>%;%3|w!vdh{+*pRqW!I0 z6*l)2f5GKnm--Jhs6*-G+g-WImzmO6CB|Qy7vV8|YVu)T9+kTMWuo67>7yXC8&_jN zLCeF*29p(}F=WXc7iGA!%k>`C{K6!)SE;6?p zii*94>-H?owQ3s$MFo0|UkB|@QP3Jv`f`Sz?*p1E7HD+RS2Dx={7mc4WaCw5M_Wa{ zmv+&gK!OkiJ?EHbc~JBHJPnFFpE zwG&FGe_1#>at91*;%o9(o=M1z@~azXBFae)JvG*56z4A!q{_bBSBQHqGf3=N3)K#P zDt;R2+uyHs3R5_y=nla+P)YC6w2f#m9je z#}dDl(p243&7}!Q=5jW^tOEEkMsLf_)*9pF<3L92@_dy=sq-OV-M(-mSJp@Vd0L^Q zJ#g9`7`@pt);ODp1&3OUoh$J3gprFkI^99wQD3ub0k}WW+N+BnOufpMvkPxECn|fA zU>HN75oQ8kAbSmXm<^RaldgPDjOxw1<7dGl=&$cg30fHvkzAv!tzuYeNwOWze_?pO z^I{xW5@9q9IPi28noeko_TuNwo!P)G&6p#4b>#>h1YRrs~X>e#avhVef#6NyUE zwJ#M*uyfR0TG)e1!5WqOmoH8 zZo=h#%@5uowWn%nB#A29Ndz95JH5^rsn7@gj)%%Pb}5sDkB!Fv#J2uNX;mx!bV1)Ed-qRs=CToEr+U!6lqd z`=Z&y&pXf+SwjRFl*v1e@p0B8VMJ^X5405{%T*dq zr!whkg{OlbP$V5bhV4;lbXAV8V0nq3eBf;Xm$Hs+6|koiJ6)_;=&{TK>hCPer$tSm z*g6F4(M)anfS02KlDnyguT#n>QKIr@C1)7%&9%mP)LeJzyT@RWIMg62?@dm2 zb^}O3AFiCOYB_I~OvIn&My@V7dF=w(mkhP7h|bC8EiCmPKT|aqe#?cEd+95(wHIOS zg9o3b;5lqi{00v(!TIU9tGD@80jlB&H1QLf<3?O6b$BpWLAYyEyGy~SqI1{logq`c z2Xi(@2*HJbhMIF|taI1AMP_E^iJ3o1UDlM5`))8HY_}))n^!yE!~&BJqL~ds@PQix zT}&{rszk%d3Epks;spqrz?^m;5h$>#xH50JV_7Q*DCA86R-*=gXT?Agzn22{P3o?& z#~@5=vwj31bUeXMLEofgr+4}55&%yfKO3W>&&|v6>Y3kUpgqrs7H^_L9rrX-E{gH1 zmJpz%dwDZdjc8CSUA4b7FdMo|Ne^W(t5IEGZcMl0nw>S^A`pi8wCbN?0X_@!wZZ|i zAN+2mucYgSW?nD6X`F!iN;MX!F38J%D9IfbG+j!xC!(ws?CW@nClJsSk6M6S$Jl07 z7R^U9blX{^p;;+?<*8!JF$U1cR6?dfYFw!n#3wx1r+osgx7c*DSS(8Qxpge$LBv3_BQ}&@yUHIo9+O1Jj`+#-%%`P zniN~T8>YiK-;>l_Qc>aP(brxbpT9$6IO`^xITF3`0)eQ!vdQgBv>GmxKNd5HXJWK` zbj-XlqxsZCj9u-n(Ul9I4ijF7;B&;59>C!R6a$qCIxN)<4LoTtHq|0*W8HRz*RLUU& z*k5+0C)|mslxBsHxfWg9TFyR7;9PTtZa;8X_E{gb4#g@z6V~j{7Y*vZW|lByKwx!=ro>u9Gf(n#GAI1LIVFbW}e9_XznV&Laf^& zCijoiw%0tEa_do`PrM+giII1HkZQ@noe3Lzo=4)g+Nvbj*QTclzp0b$JMQgR3@^dt zuck;u7oKLcp;So z(6V+~3Xe>`%cVt7KZZe)*wKLw@-#`Y?^v0-W&ip1v&~1oZhsjv`EnfCcKf+`Fyk3d zyPB9OlYcg5W#k4!0}$d)GPtcn z3c|XZBGa8~QY)k3n#!ypo&y%Fb8jW`K2>Xlh8xMgDD<`-fdUtZ@G19}!IB2x6ar)_P%GAA4$}aE-DW6 z0;*T~Jr8Y%CEe+6%pV{>G-EV1z}6tJ53vqC=oc;>T2;%&@|vs49sXtdB?~aDL0S9! z-(y_D{gg_^VxZ}>fC>(SD^`?7^Vk|HXR(|`2Y_+$c%Ss%ecMpL4>lxa>b8Q8G=&$c zeXB!4&3i1eIJ&$buCIrj)8iw8rJrWBE5MfkhZV;R`ny0&ch*u>UfzG{07_+2?iFxl zN_ycb(vr|al-SHQdG8CBwK5d6HeEFWh5j|QPyBtW~TST)Aq<0(-Z%+!qU zl6p7%1N%L#f+-?u#x23?4Bt=|XuJ~mJHLs69Atw(&5hyzWNf;2Cc3X2!h8V80Rpax zUcGru80Ah-KnDg$dqeu9BT7UpTip}|i9L-CDjoPa$E>+Kn0C=gK%F!F2u^fW`ceo!MKt6%ua@M ztsB${m*QwgOEg5&2|OKG&WA?U08X;!E?f&m`l+W>F&+dP_BurH=RFL+3>+xnCuRDn ze*l)}AwaSfRG&Z_0jE1p!6QB)=5%X>>;EozqE1`p;pV;&H4V!%e=Fd=EZefLHFRk` zxgi?K3$)u7E{B?5NK{Dj-nT)MbrULib30X2ZPFmHl?i7&7H@{T9{X^Tgl>|z86A$B u3j7euh^?6W@azl9iTvBv+nC->#Hp7KrLpP|079_p0s-Twzk*;^w3 literal 0 HcmV?d00001 diff --git a/docs/source/index.rst b/docs/source/index.rst index b4528cc46af77..291d0af171eb4 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -35,6 +35,7 @@ Get started using the :ref:`quickstart` or by reading about the :ref:`key concep recipes gateway/index llms/prompt-engineering + llms/llm-evaluate/index plugins auth/index cli diff --git a/docs/source/llms/llm-evaluate/index.rst b/docs/source/llms/llm-evaluate/index.rst new file mode 100644 index 0000000000000..2d90df8399dcc --- /dev/null +++ b/docs/source/llms/llm-evaluate/index.rst @@ -0,0 +1,538 @@ +.. _llm-eval: + +MLflow LLM Evaluate +==================================== + +With the emerging of ChatGPT, LLMs have shown its power of text generation in various fields, such as +question answering, translating and text summarization. Evaluating LLMs' performance is slightly different +from traditional ML models, as very often there is no single ground truth to compare against. +MLflow provides an API :py:func:`mlflow.evaluate()` to help evaluate your LLMs. + +MLflow's LLM evaluation functionality consists of 3 main components: + +1. **A model to evaluate**: it can be an MLflow ``pyfunc`` model, a URI pointing to one registered + MLflow model, or any python callable that represents your model, e.g, a HuggingFace text summarization pipeline. +2. **Metrics**: the metrics to compute, LLM evaluate will use LLM metrics. +3. **Evaluation data**: the data your model is evaluated at, it can be a pandas Dataframe, a python list, a + numpy array or an :py:func:`mlflow.data.dataset.Dataset` instance. + + +Quickstart +========== + +Below is a simple example that gives an quick overview of how MLflow LLM evaluation works. The example builds +a simple question-answering model by wrapping "openai/gpt-4" with custom prompt. You can paste it to +your IPython or local editor and execute it, and install missing dependencies as prompted. Running the code +requires OpenAI API key, if you don't have an OpenAI key, you can set it up [here](https://platform.openai.com/account/api-keys). + +.. code-block:: shell + + export OPENAI_API_KEY='your-api-key-here' + +.. code-block:: python + + import mlflow + import openai + import os + import pandas as pd + from getpass import getpass + + eval_data = pd.DataFrame( + { + "inputs": [ + "What is MLflow?", + "What is Spark?", + ], + "ground_truth": [ + "MLflow is an open-source platform for managing the end-to-end machine learning (ML) " + "lifecycle. It was developed by Databricks, a company that specializes in big data and " + "machine learning solutions. MLflow is designed to address the challenges that data " + "scientists and machine learning engineers face when developing, training, and deploying " + "machine learning models.", + "Apache Spark is an open-source, distributed computing system designed for big data " + "processing and analytics. It was developed in response to limitations of the Hadoop " + "MapReduce computing model, offering improvements in speed and ease of use. Spark " + "provides libraries for various tasks such as data ingestion, processing, and analysis " + "through its components like Spark SQL for structured data, Spark Streaming for " + "real-time data processing, and MLlib for machine learning tasks", + ], + } + ) + + with mlflow.start_run() as run: + system_prompt = "Answer the following question in two sentences" + # Wrap "gpt-4" as an MLflow model. + logged_model_info = mlflow.openai.log_model( + model="gpt-4", + task=openai.ChatCompletion, + artifact_path="model", + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": "{question}"}, + ], + ) + + # Use predefined question-answering metrics to evaluate our model. + results = mlflow.evaluate( + logged_model_info.model_uri, + eval_data, + targets="ground_truth", + model_type="question-answering", + ) + print(f"See aggregated evaluation results below: \n{results.metrics}") + + # Evaluation result for each data record is available in `results.tables`. + eval_table = results.tables["eval_results_table"] + print(f"See evaluation table below: \n{eval_table}") + + +LLM Evaluation Metrics +======================= + +There are two types of LLM evaluation metrics in MLflow: + +1. Metrics relying on SaaS model (e.g., OpenAI) for scoring, e.g., :py:func:`mlflow.metrics.answer_relevance`. These + metrics are created via :py:func:`mlflow.metrics.make_genai_metric` method. For each data record, these metrics under the hood sends + one prompt consisting of the following information to the SaaS model, and extract the score from model response: + + * Metrics definition. + * Metrics grading criteria. + * Reference examples. + * Input data/context. + * Model output. + * [optional] Ground truth. + + More details of how these fields are set can be found in the section "Create your Custom LLM-evaluation Metrics". + +2. Function-based per-row metrics. These metrics calculate a score for each data record (row in terms of Pandas/Spark dataframe), + based on certain functions, like Rouge (:py:func:`mlflow.metrics.rougeL`) or Flesch Kincaid (:py:func:`mlflow.metrics.flesch_kincaid_grade_level`). + These metrics are similar to traditional metrics. + + +Select Metrics to Evaluate +-------------------------- + +MLflow LLM evaluation includes default collections of metrics for pre-selected tasks, e.g, "question-answering". Depending on the +type of LLM use case that you are evaluating, these pre-defined collections can greatly simplify the process of running evaluations. +The default metrics for given model types are shown below: + +* **question-answering**: ``model_type="question-answering"``: + + * exact-match + * `perplexity `_ :sup:`1` + * `toxicity `_ :sup:`1` + * `ari_grade_level `_ :sup:`2` + * `flesch_kincaid_grade_level `_ :sup:`2` + +* **text-summarization**: ``model_type="text-summarization"``: + + * `ROUGE `_ :sup:`3` + * `perplexity `_ :sup:`1` + * `toxicity `_ :sup:`1` + * `ari_grade_level `_ :sup:`2` + * `flesch_kincaid_grade_level `_ :sup:`2` + +* **text models**: ``model_type="text"``: + + * `perplexity `_ :sup:`1` + * `toxicity `_ :sup:`1` + * `ari_grade_level `_ :sup:`2` + * `flesch_kincaid_grade_level `_ :sup:`2` + + +:sup:`1` Requires package `evaluate `_, `pytorch `_, and +`transformers `_ + +:sup:`2` Requires package `textstat `_ + +:sup:`3` Requires package `evaluate `_, `nltk `_, and +`rouge-score `_ + +However, using the pre-defined metrics associated with a given model type is not the only way to generate scoring metrics +for LLM evaluation in MLFlow. MLflow provides two ways for selecting metrics to evluate your LLM: + +1. Specify the ``model_type`` argument in :py:func:`mlflow.evaluate` + + * Each predefined model type comes with a standard set of metrics that are available for relevant evaluation of a model type. + * The defaults are suitable if your model falls in one of the predefined categories (e.g., ``question-answering``). + + An example of using the predefined metrics for a given ``model_type`` is shown below: + + .. code-block:: python + + results = mlflow.evaluate( + model, + eval_data, + targets="ground_truth", + model_type="question-answering", + ) + +2. Specify a custom list of metrics by explicitly referencing a metric calculation function. + + * To add additional metrics to the default collection from part 1 above, add the function names to the ``extra_metrics`` argument. + * To diable default metric calculation and only calculate explicit metrics, remove the ``model_type`` argument and define the desired metrics. + + An example of disabling the default metrics and explicitly declaring a subset of metrics to calculate is shown below: + + .. code-block:: python + + results = mlflow.evaluate( + model, + eval_data, + targets="ground_truth", + extra_metrics=[mlflow.metrics.toxicity(), mlflow.metrics.latency()], + ) + + +The full reference for supported evaluation metrics can be found `here <../python_api/mlflow.html#mlflow.evaluate>`_. + +Metrics with LLM as the Judge +--------------------------------------------- + +MLflow offers a few pre-canned metrics which uses LLM as the judge. Despite the difference under the hood, the usage +is the same - put these metrics in the ``extra_metrics`` argument in ``mlflow.evaluate()``. Here is the list of pre-canned +metrics: + +* :py:func:`mlflow.metrics.answer_similarity`: Evaluate the similarity between ground truth and your LLM outputs. +* :py:func:`mlflow.metrics.answer_correctness`: Evaluate the correctness level of your LLM outputs based on given context + and ground truth. +* :py:func:`mlflow.metrics.faithfulness`: Evaluate the faithfulness of your LLM outputs. + + +Create your Custom LLM-evaluation Metrics +--------------------------------------------- + +Create LLM-as-judge Evaluation Metrics (Category 1) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +You can also create your own Saas LLM evaluation metrics with MLflow API :py:func:`mlflow.metrics.make_genai_metric`, which +needs the following information: + +* ``name``: the name of your custom metric. +* ``definition``: describe what's the metric doing. +* ``grading_prompt``: describe the scoring critieria. +* ``examples``: a few input/output examples with score, they are used as a reference for LLM judge. +* ``model``: the identifier of LLM judge. +* ``parameters``: the extra parameters to send to LLM judge, e.g., ``temperature`` for ``"openai:/gpt-3.5-turbo-16k"``. +* ``aggregations``: aggregation strategy for the metrics. +* ``greater_is_better``: indicates if a higher score means your model is better. + +Under the hood, ``definition``, ``grading_prompt``, ``examples`` together with evaluation data and model output will be +composed into a long prompt and sent to LLM. If you are familiar with the concept of prompt engineering, +SaaS LLM evaluation metric is basically trying to compose a "right" prompt containing instructions, data and model +output so that LLM, e.g., GPT4 can output the information we want. + +Now let's create a custom GenAI metrics called "professionalism", which measures how professional our model output is. + +Let's first create a few examples with scores, these will be the reference samples LLM judge uses. To create such examples, +we will use :py:func:`mlflow.metrics.EvaluationExample` class, which has 4 fields: + +* input: input text. +* output: output text. +* score: the score for output in the context of input. +* justification: why do we give the `score` for the data. + +.. code-block:: python + + professionalism_example_score_2 = mlflow.metrics.EvaluationExample( + input="What is MLflow?", + output=( + "MLflow is like your friendly neighborhood toolkit for managing your machine learning projects. It helps " + "you track experiments, package your code and models, and collaborate with your team, making the whole ML " + "workflow smoother. It's like your Swiss Army knife for machine learning!" + ), + score=2, + justification=( + "The response is written in a casual tone. It uses contractions, filler words such as 'like', and " + "exclamation points, which make it sound less professional. " + ), + ) + professionalism_example_score_4 = mlflow.metrics.EvaluationExample( + input="What is MLflow?", + output=( + "MLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. It was " + "developed by Databricks, a company that specializes in big data and machine learning solutions. MLflow is " + "designed to address the challenges that data scientists and machine learning engineers face when " + "developing, training, and deploying machine learning models.", + ), + score=4, + justification=("The response is written in a formal language and a neutral tone. "), + ) + +Now let's define the ``professionalism`` metric, you will see how each field is set up. + +.. code-block:: python + + professionalism = mlflow.metrics.make_genai_metric( + name="professionalism", + definition=( + "Professionalism refers to the use of a formal, respectful, and appropriate style of communication that is " + "tailored to the context and audience. It often involves avoiding overly casual language, slang, or " + "colloquialisms, and instead using clear, concise, and respectful language." + ), + grading_prompt=( + "Professionalism: If the answer is written using a professional tone, below are the details for different scores: " + "- Score 0: Language is extremely casual, informal, and may include slang or colloquialisms. Not suitable for " + "professional contexts." + "- Score 1: Language is casual but generally respectful and avoids strong informality or slang. Acceptable in " + "some informal professional settings." + "- Score 2: Language is overall formal but still have casual words/phrases. Borderline for professional contexts." + "- Score 3: Language is balanced and avoids extreme informality or formality. Suitable for most professional contexts. " + "- Score 4: Language is noticeably formal, respectful, and avoids casual elements. Appropriate for formal " + "business or academic settings. " + ), + examples=[professionalism_example_score_2, professionalism_example_score_4], + model="openai:/gpt-3.5-turbo-16k", + parameters={"temperature": 0.0}, + aggregations=["mean", "variance"], + greater_is_better=True, + ) + +.. + TODO(prithvi): add best practice for creating GenAI metrics. + + +Create Per-row LLM Evluation Metrics (Category 2) +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This is very similar to creating a custom traditional metrics, with the exception of returning a `EvaluationResult` instance. +Basically you need to: + +1. Implement a ``eval_fn`` to define your scoring logic, it must take in 3 args ``predictions``, ``targets`` and ``metrics``. + ``eval_fn`` must return a :py:func:`mlflow.metrics.MetricValue` instance. +2. Pass ``eval_fn`` and other arguments to ``mlflow.metricsmake_metric`` API to create the metric. + +The following code creates a dummy per-row metric called ``"over_10_chars"``: if the model output is greater than 10, +the score is 1 otherwise 0. + +.. code-block:: python + + def eval_fn(predictions, targets, metrics): + scores = [] + for i in range(len(predictions)): + if len(predictions[i]) > 10: + scores.append(1) + else: + scores.append(0) + return MetricValue( + scores=scores, + aggregate_results=standard_aggregations(scores), + ) + + + # Create an EvaluationMetric object. + passing_code_metric = make_metric( + eval_fn=eval_fn, greater_is_better=False, name="over_10_chars" + ) + + +Prepare Your LLM for Evaluating +===================================== + +In order to evaluate your LLM with ``mlflow.evaluate()``, your LLM has to be one of the following type: + +1. A :py:func:`mlflow.pyfunc.PyFuncModel` instance or a URI pointing to a logged `mlflow.pyfunc.PyFuncModel` model. In + general we call that MLflow model. The +2. A python function that takes in string inputs and outputs a single string. Your callable must match the signature of + :py:func:`mlflow.pyfunc.PyFuncModel.predict` (without `params` argument), briefly it should: + + * Has ``data`` as the only argument, which can be a ``pandas.Dataframe``, ``numpy.ndarray``, python list, dictionary or scipy matrix. + * Returns one of ``pandas.DataFrame``, ``pandas.Series``, ``numpy.ndarray`` or list. +3. Set ``model=None``, and put model outputs in `data`. Only applicable when the data is a Pandas dataframe. + +Evaluating with an MLflow Model +--------------------------------- + +For detailed instruction on how to convert your model into a ``mlflow.pyfunc.PyFuncModel`` instance, please read +`this doc `_. But in short, +to evaluate your model as an MLflow model, we recomment following the steps below: + +1. Convert your LLM to MLflow model and log it to MLflow server by ``log_model``. Each flavor (``opeanai``, ``pytorch``, ...) + has its own ``log_model`` API, e.g., :py:func:`mlflow.openai.log_model()`: + + .. code-block:: python + + with mlflow.start_run(): + system_prompt = "Answer the following question in two sentences" + # Wrap "gpt-3.5-turbo" as an MLflow model. + logged_model_info = mlflow.openai.log_model( + model="gpt-3.5-turbo", + task=openai.ChatCompletion, + artifact_path="model", + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": "{question}"}, + ], + ) +2. Use the URI of logged model as the model instance in ``mlflow.evaluate()``: + + .. code-block:: python + + results = mlflow.evaluate( + logged_model_info.model_uri, + eval_data, + targets="ground_truth", + model_type="question-answering", + ) + +Evaluating with a Custom Function +---------------------------------- + +As of MLflow 2.8.0, :py:func:`mlflow.evaluate()` supports evaluating a python function without requiring +logging the model to MLflow. This is useful when you don't want to log the model and just want to evaluate +it. The following example uses :py:func:`mlflow.evaluate()` to evaluate a function. You also need to set +up OpenAI authentication to run the code below. + +.. code-block:: python + + eval_data = pd.DataFrame( + { + "inputs": [ + "What is MLflow?", + "What is Spark?", + ], + "ground_truth": [ + "MLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. It was developed by Databricks, a company that specializes in big data and machine learning solutions. MLflow is designed to address the challenges that data scientists and machine learning engineers face when developing, training, and deploying machine learning models.", + "Apache Spark is an open-source, distributed computing system designed for big data processing and analytics. It was developed in response to limitations of the Hadoop MapReduce computing model, offering improvements in speed and ease of use. Spark provides libraries for various tasks such as data ingestion, processing, and analysis through its components like Spark SQL for structured data, Spark Streaming for real-time data processing, and MLlib for machine learning tasks", + ], + } + ) + + + def openai_qa(inputs): + answers = [] + system_prompt = "Please answer the following question in formal language." + for index, row in inputs.iterrows(): + completion = openai.ChatCompletion.create( + model="gpt-3.5-turbo", + messages=[ + {"role": "system", "content": system_prompt}, + {"role": "user", "content": "{row}"}, + ], + ) + answers.append(completion.choices[0].message.content) + + return answers + + + with mlflow.start_run() as run: + results = mlflow.evaluate( + openai_qa, + eval_data, + model_type="question-answering", + ) + +Evaluating with a Static Dataset +---------------------------------- + +For MLflow >= 2.8.0, :py:func:`mlflow.evaluate()` supports evaluating a static dataset without specifying a model. +This is useful when you save the model output to a column in a Pandas DataFrame or an MLflow PandasDataset, and +want to evaluate the static dataset without re-running the model. + +If you are using a Pandas DataFrame, you must specify the column name that contains the model output using the +top-level ``predictions`` parameter in :py:func:`mlflow.evaluate()`: + + +.. code-block:: python + + import mlflow + import pandas as pd + + eval_data = pd.DataFrame( + { + "inputs": [ + "What is MLflow?", + "What is Spark?", + ], + "ground_truth": [ + "MLflow is an open-source platform for managing the end-to-end machine learning (ML) lifecycle. " + "It was developed by Databricks, a company that specializes in big data and machine learning solutions. " + "MLflow is designed to address the challenges that data scientists and machine learning engineers " + "face when developing, training, and deploying machine learning models.", + "Apache Spark is an open-source, distributed computing system designed for big data processing and " + "analytics. It was developed in response to limitations of the Hadoop MapReduce computing model, " + "offering improvements in speed and ease of use. Spark provides libraries for various tasks such as " + "data ingestion, processing, and analysis through its components like Spark SQL for structured data, " + "Spark Streaming for real-time data processing, and MLlib for machine learning tasks", + ], + "predictions": [ + "MLflow is an open-source platform that provides handy tools to manage Machine Learning workflow " + "lifecycle in a simple way", + "Spark is a popular open-source distributed computing system designed for big data processing and analytics.", + ], + } + ) + + with mlflow.start_run() as run: + results = mlflow.evaluate( + data=eval_data, + targets="ground_truth", + predictions="predictions", + extra_metrics=[mlflow.metrics.answer_similarity()], + evaluators="default", + ) + print(f"See aggregated evaluation results below: \n{results.metrics}") + + eval_table = results.tables["eval_results_table"] + print(f"See evaluation table below: \n{eval_table}") + + +View Evaluation Results +======================== + +View Evaluation Results via Code +----------------------------------- + +``mlflow.evaluate()`` returns the evaluation results as an :py:func:`mlflow.models.EvaluationResult` instace. +To see the score on selected metrics, you can check: + +* ``metrics``: stores the aggregated results, like average/variance across the evaluation dataset. Let's take a second + pass on the code example above and focus on printing out the aggregated results. + + .. code-block:: python + + with mlflow.start_run() as run: + results = mlflow.evaluate( + data=eval_data, + targets="ground_truth", + predictions="predictions", + extra_metrics=[mlflow.metrics.answer_similarity()], + evaluators="default", + ) + print(f"See aggregated evaluation results below: \n{results.metrics}") + +* ``tables["eval_results_table"]``: stores the per-row evaluation results. + + .. code-block:: python + + with mlflow.start_run() as run: + results = mlflow.evaluate( + data=eval_data, + targets="ground_truth", + predictions="predictions", + extra_metrics=[mlflow.metrics.answer_similarity()], + evaluators="default", + ) + print( + f"See per-data evaluation results below: \n{results.tables['eval_results_table']}" + ) + + +View Evaluation Results via MLflow UI +-------------------------------------- + +Your evaluation result is automatically logged into MLflow server, so you can view your evaluation results directly from the +MLflow UI. To view the evaluation results on MLflow UI, please follow the steps below: + +1. Go to the experiment view of your MLflow experiment. +2. Select the "Evaluation" tab. +3. Select the runs you want to check evaluation results. +4. Select the metrics from the dropdown menu on the right side. + +Please see the screenshot below for clarity: + + +.. figure:: ../../_static/images/llm_evaluate_experiment_view.png + :width: 1024px + :align: center + :alt: Demo UI of MLflow evaluate \ No newline at end of file From ab04ff5689ac0ef95a0a7bbcc42827fb18601238 Mon Sep 17 00:00:00 2001 From: Chen Qian Date: Mon, 23 Oct 2023 20:46:43 -0700 Subject: [PATCH 061/101] Fix lightning autolog code example (#9964) Signed-off-by: chenmoneygithub --- mlflow/pytorch/__init__.py | 64 ++++++++------------------ requirements/doc-requirements.txt | 3 ++ requirements/extra-ml-requirements.txt | 2 +- 3 files changed, 22 insertions(+), 47 deletions(-) diff --git a/mlflow/pytorch/__init__.py b/mlflow/pytorch/__init__.py index bb24b7f680977..11df0adcce244 100644 --- a/mlflow/pytorch/__init__.py +++ b/mlflow/pytorch/__init__.py @@ -947,35 +947,28 @@ def autolog( The registered model is created if it does not already exist. :param extra_tags: A dictionary of extra tags to set on each managed run created by autologging. - .. code-block:: python + .. testcode:: python :caption: Example import os - import pytorch_lightning as pl + import lightning as L import torch from torch.nn import functional as F - from torch.utils.data import DataLoader + from torch.utils.data import DataLoader, Subset + from torchmetrics import Accuracy from torchvision import transforms from torchvision.datasets import MNIST - try: - from torchmetrics.functional import accuracy - except ImportError: - from pytorch_lightning.metrics.functional import accuracy - import mlflow.pytorch from mlflow import MlflowClient - # For brevity, here is the simplest most minimal example with just a training - # loop step, (no validation, no testing). It illustrates how you can use MLflow - # to auto log parameters, metrics, and models. - - class MNISTModel(pl.LightningModule): + class MNISTModel(L.LightningModule): def __init__(self): super().__init__() self.l1 = torch.nn.Linear(28 * 28, 10) + self.accuracy = Accuracy("multiclass", num_classes=10) def forward(self, x): return torch.relu(self.l1(x.view(x.size(0), -1))) @@ -985,9 +978,9 @@ def training_step(self, batch, batch_nb): logits = self(x) loss = F.cross_entropy(logits, y) pred = logits.argmax(dim=1) - acc = accuracy(pred, y) + acc = self.accuracy(pred, y) - # Use the current of PyTorch logger + # PyTorch `self.log` will be automatically captured by MLflow. self.log("train_loss", loss, on_epoch=True) self.log("acc", acc, on_epoch=True) return loss @@ -1006,51 +999,30 @@ def print_auto_logged_info(r): print(f"tags: {tags}") - # Initialize our model + # Initialize our model. mnist_model = MNISTModel() - # Initialize DataLoader from MNIST Dataset + # Load MNIST dataset. train_ds = MNIST( os.getcwd(), train=True, download=True, transform=transforms.ToTensor() ) - train_loader = DataLoader(train_ds, batch_size=32) + # Only take a subset of the data for faster training. + indices = torch.arange(32) + train_ds = Subset(train_ds, indices) + train_loader = DataLoader(train_ds, batch_size=8) - # Initialize a trainer - trainer = pl.Trainer(max_epochs=20, progress_bar_refresh_rate=20) + # Initialize a trainer. + trainer = L.Trainer(max_epochs=3) # Auto log all MLflow entities mlflow.pytorch.autolog() - # Train the model + # Train the model. with mlflow.start_run() as run: trainer.fit(mnist_model, train_loader) - # fetch the auto logged parameters and metrics + # Fetch the auto logged parameters and metrics. print_auto_logged_info(mlflow.get_run(run_id=run.info.run_id)) - - .. code-block:: text - :caption: Output - - run_id: 42caa17b60cb489c8083900fb52506a7 - artifacts: ['model/MLmodel', 'model/conda.yaml', 'model/data'] - params: {'betas': '(0.9, 0.999)', - 'weight_decay': '0', - 'epochs': '20', - 'eps': '1e-08', - 'lr': '0.02', - 'optimizer_name': 'Adam', ' - amsgrad': 'False'} - metrics: {'acc_step': 0.0, - 'train_loss_epoch': 1.0917967557907104, - 'train_loss_step': 1.0794280767440796, - 'train_loss': 1.0794280767440796, - 'acc_epoch': 0.0033333334140479565, - 'acc': 0.0} - tags: {'Mode': 'training'} - - .. figure:: ../_static/images/pytorch_lightening_autolog.png - - PyTorch autologged MLflow entities """ try: import pytorch_lightning as pl diff --git a/requirements/doc-requirements.txt b/requirements/doc-requirements.txt index ea7c429684c42..648d5c98a1fc4 100644 --- a/requirements/doc-requirements.txt +++ b/requirements/doc-requirements.txt @@ -11,3 +11,6 @@ tensorflow-cpu<=2.12.0 pyspark datasets keras-core +torch>=1.11.0 +torchvision>=0.12.0 +lightning>=1.8.1 diff --git a/requirements/extra-ml-requirements.txt b/requirements/extra-ml-requirements.txt index a76cca72f0368..1b362e1689756 100644 --- a/requirements/extra-ml-requirements.txt +++ b/requirements/extra-ml-requirements.txt @@ -13,7 +13,7 @@ tensorflow-cpu>=2.8.0 # Required by mlflow.pytorch torch>=1.11.0 torchvision>=0.12.0 -pytorch_lightning>=1.5.10 +lightning>=1.8.1 # Required by mlflow.xgboost xgboost>=0.82 # Required by mlflow.lightgbm From dce4265c480a0bccc1821cfbb29c163cfa42cc6d Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Tue, 24 Oct 2023 13:19:07 +0900 Subject: [PATCH 062/101] Fix mleap test failures (#10085) Signed-off-by: harupy --- tests/mleap/test_mleap_model_export.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/mleap/test_mleap_model_export.py b/tests/mleap/test_mleap_model_export.py index 75934fc9457ea..a410bad38e898 100644 --- a/tests/mleap/test_mleap_model_export.py +++ b/tests/mleap/test_mleap_model_export.py @@ -44,13 +44,13 @@ def get_mleap_jars(): @pytest.fixture(scope="module") -def spark_context(): +def spark(): conf = pyspark.SparkConf() conf.set(key="spark.jars.packages", value=get_mleap_jars()) # Exclude `net.sourceforge.f2j` to avoid `java.io.FileNotFoundException` conf.set(key="spark.jars.excludes", value="net.sourceforge.f2j:arpack_combined_all") with get_spark_session(conf) as spark_session: - yield spark_session.sparkContext + yield spark_session @pytest.mark.skipif( From d7cbf86dca32387571151197210757b0e75848e9 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Tue, 24 Oct 2023 13:39:08 +0900 Subject: [PATCH 063/101] Disable mleap tests (#10091) Signed-off-by: harupy <17039389+harupy@users.noreply.github.com> --- dev/set_matrix.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev/set_matrix.py b/dev/set_matrix.py index 4cc2a604eeebd..837e42ab43fc9 100644 --- a/dev/set_matrix.py +++ b/dev/set_matrix.py @@ -451,7 +451,7 @@ def main(args): matrix = generate_matrix(args) is_matrix_empty = len(matrix) == 0 matrix = sorted(matrix, key=lambda x: (x.name, x.category, x.version)) - matrix = [x for x in matrix if x.flavor != "gluon"] + matrix = [x for x in matrix if x.flavor not in ("gluon", "mleap")] matrix = {"include": matrix, "job_name": [x.job_name for x in matrix]} print(divider("Matrix")) From 6a605fdf5e42e50d1ac336a45041e6fe976c93f4 Mon Sep 17 00:00:00 2001 From: Chen Qian Date: Mon, 23 Oct 2023 22:11:09 -0700 Subject: [PATCH 064/101] Add PyTorch index to circleci config for faster installation (#10090) Signed-off-by: chenmoneygithub --- .circleci/config.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index c9e7b4a488197..10eae6e9bdf24 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -38,6 +38,8 @@ jobs: command: | pip --version pip install --progress-bar off -r requirements/doc-requirements.txt pytest pytest-cov plotly .[gateway] + environment: + PIP_EXTRA_INDEX_URL: https://download.pytorch.org/whl/cpu - run: name: Build documentation working_directory: docs From 299c229e874b68fcb65928b2e690cb31a10dc241 Mon Sep 17 00:00:00 2001 From: Daniel Lok Date: Tue, 24 Oct 2023 14:47:32 +0800 Subject: [PATCH 065/101] Add a more helpful error message when trying to save unserializable langchain models (#10026) Signed-off-by: Daniel Lok --- mlflow/langchain/__init__.py | 10 +++++-- .../langchain/test_langchain_model_export.py | 28 +++++++++++++++++++ 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/mlflow/langchain/__init__.py b/mlflow/langchain/__init__.py index e68817cabe87b..76c234d957a83 100644 --- a/mlflow/langchain/__init__.py +++ b/mlflow/langchain/__init__.py @@ -571,8 +571,14 @@ def _save_model(model, path, loader_fn, persist_dir): if model.tools: tools_data_path = os.path.join(path, _TOOLS_DATA_FILE_NAME) - with open(tools_data_path, "wb") as f: - cloudpickle.dump(model.tools, f) + try: + with open(tools_data_path, "wb") as f: + cloudpickle.dump(model.tools, f) + except Exception as e: + raise mlflow.MlflowException( + "Error when attempting to pickle the AgentExecutor tools. " + "This model likely does not support serialization." + ) from e model_data_kwargs[_TOOLS_DATA_KEY] = _TOOLS_DATA_FILE_NAME else: raise mlflow.MlflowException.invalid_parameter_value( diff --git a/tests/langchain/test_langchain_model_export.py b/tests/langchain/test_langchain_model_export.py index f0cabbb757b41..305420bf70d19 100644 --- a/tests/langchain/test_langchain_model_export.py +++ b/tests/langchain/test_langchain_model_export.py @@ -12,6 +12,7 @@ import pytest import transformers from langchain import SQLDatabase +from langchain.agents import AgentType, initialize_agent from langchain.chains import ( APIChain, ConversationChain, @@ -32,6 +33,7 @@ from langchain.prompts import PromptTemplate from langchain.requests import TextRequestsWrapper from langchain.text_splitter import CharacterTextSplitter +from langchain.tools import Tool from langchain.vectorstores import FAISS from langchain_experimental.sql import SQLDatabaseChain from packaging import version @@ -736,3 +738,29 @@ def test_unsupported_class(): ): with mlflow.start_run(): mlflow.langchain.log_model(llm, "fake_llm") + + +def test_agent_with_unpicklable_tools(tmp_path): + tmp_file = tmp_path / "temp_file.txt" + with open(tmp_file, mode="w") as temp_file: + # files that aren't opened for reading cannot be pickled + tools = [ + Tool.from_function( + func=lambda: temp_file, + name="Write 0", + description="If you need to write 0 to a file", + ) + ] + agent = initialize_agent( + llm=OpenAI(temperature=0), tools=tools, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION + ) + + with pytest.raises( + MlflowException, + match=( + "Error when attempting to pickle the AgentExecutor tools. " + "This model likely does not support serialization." + ), + ): + with mlflow.start_run(): + mlflow.langchain.log_model(agent, "unpicklable_tools") From e41887d90de87c0e8b9e1516949615341a305550 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Tue, 24 Oct 2023 18:08:47 +0900 Subject: [PATCH 066/101] Update devcontainer image and extensions (#10096) Signed-off-by: harupy --- .devcontainer/devcontainer.json | 5 + .devcontainer/pip-compile.sh | 6 +- .devcontainer/requirements.txt | 625 ++++++++++++++++++-------- requirements/doc-min-requirements.txt | 8 + requirements/doc-requirements.txt | 10 +- 5 files changed, 460 insertions(+), 194 deletions(-) create mode 100644 requirements/doc-min-requirements.txt diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json index d2d7db5e7eca5..02982c55c7286 100644 --- a/.devcontainer/devcontainer.json +++ b/.devcontainer/devcontainer.json @@ -20,12 +20,17 @@ "prettier.configPath": "/workspaces/mlflow/mlflow/server/js/.prettierrc.js" }, "extensions": [ + "charliermarsh.ruff", "dbaeumer.vscode-eslint", "eamodio.gitlens", "esbenp.prettier-vscode", + "GitHub.copilot", + "GitHub.copilot-chat", "GitHub.vscode-pull-request-github", "ms-azuretools.vscode-docker", + "ms-python.black-formatter", "ms-python.python", + "ms-toolsai.jupyter", "oderwat.indent-rainbow", "PKief.material-icon-theme", "ritwickdey.LiveServer", diff --git a/.devcontainer/pip-compile.sh b/.devcontainer/pip-compile.sh index 698b9fa514d2c..1312b8b0db821 100755 --- a/.devcontainer/pip-compile.sh +++ b/.devcontainer/pip-compile.sh @@ -5,13 +5,15 @@ set -ex pip install pip-tools cd requirements +echo ipykernel >> /tmp/requirements.txt pip-compile --verbose \ --output-file /tmp/output.txt \ skinny-requirements.txt \ core-requirements.txt \ - doc-requirements.txt \ + doc-min-requirements.txt \ test-requirements.txt \ - lint-requirements.txt + lint-requirements.txt \ + /tmp/requirements.txt # Add a timestamp at the beginning of the file echo "# Created at: $(date -u +"%F %T %Z")" | cat - /tmp/output.txt > /tmp/requirements.txt diff --git a/.devcontainer/requirements.txt b/.devcontainer/requirements.txt index 6387ce917dd3f..8db32b7b2b795 100644 --- a/.devcontainer/requirements.txt +++ b/.devcontainer/requirements.txt @@ -1,419 +1,632 @@ -# Created at: 2022-11-16 04:25:48 UTC +# Created at: 2023-10-24 08:12:52 UTC # -# This file is autogenerated by pip-compile with python 3.8 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.8 +# by the following command: # -# pip-compile --output-file=/tmp/output.txt core-requirements.txt doc-requirements.txt lint-requirements.txt skinny-requirements.txt test-requirements.txt +# pip-compile --output-file=/tmp/output.txt /tmp/requirements.txt core-requirements.txt doc-min-requirements.txt lint-requirements.txt skinny-requirements.txt test-requirements.txt # -alabaster==0.7.12 +absl-py==2.0.0 + # via rouge-score +aiofiles==23.2.1 + # via mlserver +aiohttp==3.8.6 + # via + # datasets + # fsspec + # tritonclient +aiokafka==0.8.1 + # via mlserver +aiosignal==1.3.1 + # via aiohttp +alabaster==0.7.13 # via sphinx -alembic==1.8.1 - # via -r core-requirements.txt +alembic==1.12.0 + # via + # -r core-requirements.txt + # mlflow +anyio==4.0.0 + # via starlette astroid==2.11.7 # via pylint -asttokens==2.1.0 +asttokens==2.4.0 # via stack-data -attrs==22.1.0 - # via pytest -azure-core==1.26.1 +async-timeout==4.0.3 + # via + # aiohttp + # aiokafka +attrs==23.1.0 + # via aiohttp +azure-core==1.29.5 # via # azure-identity # azure-storage-blob - # msrest -azure-identity==1.12.0 + # azure-storage-file-datalake +azure-identity==1.14.1 # via -r test-requirements.txt -azure-storage-blob==12.14.1 +azure-storage-blob==12.18.3 + # via + # -r test-requirements.txt + # azure-storage-file-datalake +azure-storage-file-datalake==12.13.2 # via -r test-requirements.txt -babel==2.11.0 +babel==2.13.0 # via sphinx backcall==0.2.0 # via ipython -black[jupyter]==22.3.0 +black[jupyter]==23.7.0 + # via + # -r lint-requirements.txt + # black + # blacken-docs +blacken-docs==1.16.0 # via -r lint-requirements.txt -boto3==1.26.10 +boto3==1.28.69 # via moto -botocore==1.29.10 +botocore==1.31.69 # via # boto3 # moto # s3transfer -certifi==2022.12.7 +brotli==1.1.0 + # via geventhttpclient +certifi==2023.7.22 # via - # msrest + # geventhttpclient # requests -cffi==1.15.1 +cffi==1.16.0 # via cryptography -cfgv==3.3.1 +cfgv==3.4.0 # via pre-commit -charset-normalizer==2.1.1 - # via requests -click==8.1.3 +charset-normalizer==3.3.1 + # via + # aiohttp + # requests +click==8.1.7 # via # -r skinny-requirements.txt # black # databricks-cli # flask + # mlflow + # mlserver + # nltk # sphinx-click # typer -cloudpickle==2.2.0 + # uvicorn +cloudpickle==2.2.1 # via # -r skinny-requirements.txt # hyperopt + # mlflow # shap colorama==0.4.6 # via # sphinx-autobuild # typer +comm==0.1.4 + # via + # ipykernel + # ipywidgets commonmark==0.9.1 # via rich -contourpy==1.0.6 +contourpy==1.1.1 # via matplotlib -coverage[toml]==6.5.0 - # via pytest-cov -cryptography==38.0.3 +coverage[toml]==7.3.2 + # via + # coverage + # pytest-cov +cryptography==41.0.4 # via # azure-identity # azure-storage-blob # moto # msal # pyjwt -cycler==0.11.0 +cuda-python==12.3.0 + # via tritonclient +cycler==0.12.1 # via matplotlib databricks-cli @ git+https://github.com/databricks/databricks-cli.git # via # -r skinny-requirements.txt # -r test-requirements.txt + # mlflow +databricks-sdk==0.11.0 + # via -r test-requirements.txt +datasets==2.14.6 + # via evaluate +debugpy==1.8.0 + # via ipykernel decorator==5.1.1 # via ipython -dill==0.3.6 - # via pylint -distlib==0.3.6 +dill==0.3.7 + # via + # datasets + # evaluate + # multiprocess + # pylint +distlib==0.3.7 # via virtualenv -docker==6.0.1 - # via -r core-requirements.txt +docker==6.1.3 + # via + # -r core-requirements.txt + # mlflow docutils==0.16 # via # rstcheck-core # sphinx # sphinx-click entrypoints==0.4 - # via -r skinny-requirements.txt -exceptiongroup==1.0.4 - # via pytest -executing==1.2.0 + # via + # -r skinny-requirements.txt + # mlflow +evaluate==0.4.1 + # via -r test-requirements.txt +exceptiongroup==1.1.3 + # via + # anyio + # pytest +executing==2.0.0 # via stack-data -filelock==3.8.0 +fastapi==0.89.1 + # via mlserver +filelock==3.12.4 # via # huggingface-hub - # transformers # virtualenv -flaml==1.0.13 +flaml[automl]==2.1.1 # via -r test-requirements.txt -flask==2.2.2 - # via -r core-requirements.txt -fonttools==4.38.0 +flask==2.2.5 + # via + # -r core-requirements.txt + # -r doc-min-requirements.txt + # mlflow +fonttools==4.43.1 # via matplotlib -future==0.18.2 +frozenlist==1.4.0 + # via + # aiohttp + # aiosignal +fsspec[http]==2023.10.0 + # via + # datasets + # evaluate + # huggingface-hub +future==0.18.3 # via hyperopt -gitdb==4.0.9 +gevent==23.9.1 + # via geventhttpclient +geventhttpclient==2.0.2 + # via tritonclient +gitdb==4.0.11 # via gitpython -gitpython==3.1.32 - # via -r skinny-requirements.txt -greenlet==2.0.1 - # via sqlalchemy -gunicorn==20.1.0 ; platform_system != "Windows" - # via -r core-requirements.txt -huggingface-hub==0.10.1 - # via transformers +gitpython==3.1.40 + # via + # -r skinny-requirements.txt + # mlflow +greenlet==3.0.0 + # via + # gevent + # sqlalchemy +grpcio==1.59.0 + # via + # mlserver + # py-grpc-prometheus +gunicorn==21.2.0 ; platform_system != "Windows" + # via + # -r core-requirements.txt + # mlflow +h11==0.14.0 + # via uvicorn +huggingface-hub==0.18.0 + # via + # -r test-requirements.txt + # datasets + # evaluate hyperopt==0.2.7 # via -r test-requirements.txt -identify==2.5.8 +identify==2.5.30 # via pre-commit idna==3.4 - # via requests + # via + # anyio + # requests + # yarl imagesize==1.4.1 # via sphinx -importlib-metadata==5.0.0 +importlib-metadata==6.8.0 # via # -r skinny-requirements.txt # alembic # flask + # jupyter-client # markdown + # mlflow # numba -importlib-resources==5.10.0 - # via alembic -iniconfig==1.1.1 +importlib-resources==6.1.0 + # via + # alembic + # matplotlib + # mlserver +iniconfig==2.0.0 # via pytest -ipython==8.6.0 +ipykernel==6.25.2 + # via -r /tmp/requirements.txt +ipython==8.12.3 # via # -r test-requirements.txt # black + # ipykernel + # ipywidgets +ipywidgets==8.1.1 + # via -r test-requirements.txt isodate==0.6.1 - # via msrest -isort==5.10.1 + # via + # azure-storage-blob + # azure-storage-file-datalake +isort==5.12.0 # via pylint itsdangerous==2.1.2 # via flask -jedi==0.18.1 +jedi==0.19.1 # via ipython jinja2==3.0.3 ; platform_system != "Windows" # via # -r core-requirements.txt - # -r doc-requirements.txt + # -r doc-min-requirements.txt # flask + # mlflow # moto # sphinx jmespath==1.0.1 # via # boto3 # botocore -joblib==1.2.0 - # via scikit-learn +joblib==1.3.2 + # via + # nltk + # scikit-learn +jupyter-client==8.4.0 + # via ipykernel +jupyter-core==5.4.0 + # via + # ipykernel + # jupyter-client +jupyterlab-widgets==3.0.9 + # via ipywidgets +kafka-python==2.0.2 + # via aiokafka kaleido==0.2.1 # via -r test-requirements.txt -kiwisolver==1.4.4 +kiwisolver==1.4.5 # via matplotlib -lazy-object-proxy==1.8.0 +lazy-object-proxy==1.9.0 # via astroid -lightgbm==3.3.3 +lightgbm==4.1.0 # via flaml livereload==2.6.3 # via sphinx-autobuild -llvmlite==0.39.1 +llvmlite==0.41.1 # via numba mako==1.2.4 # via alembic -markdown==3.4.1 - # via -r core-requirements.txt -markupsafe==2.1.1 +markdown==3.5 + # via + # -r core-requirements.txt + # mlflow +markupsafe==2.1.3 # via # jinja2 # mako - # moto # werkzeug -matplotlib==3.6.2 - # via -r core-requirements.txt +matplotlib==3.7.3 + # via + # -r core-requirements.txt + # mlflow matplotlib-inline==0.1.6 - # via ipython + # via + # ipykernel + # ipython mccabe==0.7.0 # via pylint -moto==4.0.9 +mlflow==2.7.1 + # via mlserver-mlflow +mlserver==1.3.5 + # via + # -r test-requirements.txt + # mlserver-mlflow +mlserver-mlflow==1.3.5 # via -r test-requirements.txt -msal==1.20.0 +moto==4.2.4 + # via -r test-requirements.txt +msal==1.24.1 # via # azure-identity # msal-extensions msal-extensions==1.0.0 # via azure-identity -msrest==0.7.1 - # via azure-storage-blob -mypy-extensions==0.4.3 +multidict==6.0.4 + # via + # aiohttp + # yarl +multiprocess==0.70.15 + # via + # datasets + # evaluate +mypy-extensions==1.0.0 # via black -networkx==2.8.8 +nest-asyncio==1.5.8 + # via ipykernel +networkx==3.1 # via hyperopt -nodeenv==1.7.0 +nltk==3.8.1 + # via + # -r test-requirements.txt + # rouge-score +nodeenv==1.8.0 # via pre-commit -numba==0.56.4 +numba==0.58.1 # via shap -numpy==1.23.4 +numpy==1.24.4 # via # -r core-requirements.txt # contourpy + # datasets + # evaluate # flaml # hyperopt # lightgbm # matplotlib + # mlflow + # mlserver # numba # pandas # pyarrow + # rouge-score # scikit-learn # scipy # shap - # transformers + # tritonclient # xgboost oauthlib==3.2.2 - # via - # databricks-cli - # requests-oauthlib -packaging==21.3 + # via databricks-cli +orjson==3.9.9 + # via mlserver +packaging==23.2 # via # -r skinny-requirements.txt + # aiokafka + # black + # datasets # docker + # evaluate + # gunicorn # huggingface-hub + # ipykernel # matplotlib + # mlflow + # plotly # pytest # shap # sphinx - # transformers -pandas==1.5.1 +pandas==2.0.3 # via # -r core-requirements.txt + # datasets + # evaluate # flaml + # mlflow + # mlserver # shap parso==0.8.3 # via jedi -pathspec==0.10.2 +pathspec==0.11.2 # via black pexpect==4.8.0 # via ipython pickleshare==0.7.5 # via ipython -pillow==9.3.0 +pillow==10.1.0 # via # -r test-requirements.txt # matplotlib -platformdirs==2.5.4 +platformdirs==3.11.0 # via # black + # jupyter-core # pylint # virtualenv -plotly==5.11.0 +plotly==5.17.0 # via -r test-requirements.txt -pluggy==1.0.0 +pluggy==1.3.0 # via pytest -portalocker==2.6.0 +portalocker==2.8.2 # via msal-extensions pre-commit==2.20.0 # via -r lint-requirements.txt -prompt-toolkit==3.0.32 +prometheus-client==0.17.1 + # via + # py-grpc-prometheus + # starlette-exporter +prompt-toolkit==3.0.39 # via ipython -protobuf==4.21.9 - # via -r skinny-requirements.txt +protobuf==4.24.4 + # via + # -r skinny-requirements.txt + # mlflow + # mlserver +psutil==5.9.6 + # via + # -r core-requirements.txt + # ipykernel ptyprocess==0.7.0 # via pexpect pure-eval==0.2.2 # via stack-data +py-grpc-prometheus==0.7.0 + # via mlserver py4j==0.10.9.7 # via hyperopt -pyarrow==10.0.0 - # via -r core-requirements.txt +pyarrow==13.0.0 + # via + # -r core-requirements.txt + # datasets + # mlflow pycparser==2.21 # via cffi -pydantic==1.10.2 - # via rstcheck-core -pygments==2.15.0 +pydantic==1.10.13 + # via + # fastapi + # rstcheck-core +pygments==2.16.1 # via # ipython # rich # sphinx -pyjwt[crypto]==2.6.0 +pyjwt[crypto]==2.8.0 # via # databricks-cli # msal pylint==2.14.4 # via -r lint-requirements.txt -pyparsing==3.0.9 - # via - # matplotlib - # packaging -pytest==7.2.0 +pyparsing==3.1.1 + # via matplotlib +pyphen==0.14.0 + # via textstat +pytest==7.4.2 # via # -r test-requirements.txt # pytest-cov -pytest-cov==4.0.0 + # pytest-timeout +pytest-cov==4.1.0 # via -r test-requirements.txt pytest-localserver==0.5.0 # via -r test-requirements.txt +pytest-timeout==2.2.0 + # via -r test-requirements.txt python-dateutil==2.8.2 # via # botocore + # jupyter-client # matplotlib # moto # pandas -pytz==2022.6 +python-dotenv==1.0.0 + # via mlserver +python-rapidjson==1.12 + # via tritonclient +pytz==2023.3.post1 # via # -r skinny-requirements.txt # babel - # moto + # mlflow # pandas -pyyaml==6.0 +pyyaml==6.0.1 # via # -r skinny-requirements.txt + # datasets # huggingface-hub + # mlflow # pre-commit - # transformers +pyzmq==25.1.1 + # via + # ipykernel + # jupyter-client querystring-parser==1.2.4 - # via -r core-requirements.txt -regex==2022.10.31 - # via transformers -requests==2.28.1 + # via + # -r core-requirements.txt + # mlflow +regex==2023.10.3 + # via + # nltk + # tiktoken +requests==2.31.0 # via # -r skinny-requirements.txt # azure-core # databricks-cli + # databricks-sdk + # datasets # docker + # evaluate + # fsspec # huggingface-hub + # mlflow # moto # msal - # msrest - # requests-oauthlib # responses # sphinx - # transformers -requests-oauthlib==1.3.1 - # via msrest -responses==0.22.0 - # via moto + # tiktoken +responses==0.18.0 + # via + # evaluate + # moto rich==12.6.0 # via typer +rouge-score==0.1.2 + # via -r test-requirements.txt rstcheck==6.1.1 # via -r lint-requirements.txt rstcheck-core==1.0.3 # via rstcheck -s3transfer==0.6.0 +ruff==0.0.292 + # via -r lint-requirements.txt +s3transfer==0.7.0 # via boto3 -scikit-learn==1.1.3 +scikit-learn==1.3.2 # via # -r core-requirements.txt # flaml - # lightgbm + # mlflow # shap -scipy==1.9.3 +scipy==1.10.1 # via # -r core-requirements.txt # flaml # hyperopt # lightgbm + # mlflow # scikit-learn # shap # xgboost -shap==0.41.0 - # via - # -r core-requirements.txt - # -r test-requirements.txt -shellingham==1.5.0 +shap==0.43.0 + # via -r test-requirements.txt +shellingham==1.5.4 # via typer six==1.16.0 # via # asttokens # azure-core - # azure-identity # databricks-cli + # geventhttpclient # hyperopt # isodate # livereload # python-dateutil # querystring-parser + # rouge-score slicer==0.0.7 # via shap -smmap==5.0.0 +smmap==5.0.1 # via gitdb +sniffio==1.3.0 + # via anyio snowballstemmer==2.2.0 # via sphinx sphinx==3.5.4 # via - # -r doc-requirements.txt + # -r doc-min-requirements.txt # sphinx-autobuild # sphinx-click sphinx-autobuild==2021.3.14 - # via -r doc-requirements.txt -sphinx-click==4.3.0 - # via -r doc-requirements.txt -sphinxcontrib-applehelp==1.0.2 + # via -r doc-min-requirements.txt +sphinx-click==5.0.1 + # via -r doc-min-requirements.txt +sphinxcontrib-applehelp==1.0.4 # via sphinx sphinxcontrib-devhelp==1.0.2 # via sphinx -sphinxcontrib-htmlhelp==2.0.0 +sphinxcontrib-htmlhelp==2.0.1 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx @@ -421,94 +634,140 @@ sphinxcontrib-qthelp==1.0.3 # via sphinx sphinxcontrib-serializinghtml==1.1.5 # via sphinx -sqlalchemy==1.4.44 +sqlalchemy==2.0.22 # via # -r core-requirements.txt # alembic -sqlparse==0.4.3 - # via -r skinny-requirements.txt -stack-data==0.6.1 + # mlflow +sqlparse==0.4.4 + # via + # -r skinny-requirements.txt + # mlflow +stack-data==0.6.3 # via ipython +starlette==0.22.0 + # via + # fastapi + # starlette-exporter +starlette-exporter==0.16.0 + # via mlserver tabulate==0.9.0 # via databricks-cli -tenacity==8.1.0 +tenacity==8.2.3 # via plotly -threadpoolctl==3.1.0 +textstat==0.7.3 + # via -r test-requirements.txt +threadpoolctl==3.2.0 # via scikit-learn -tokenize-rt==5.0.0 +tiktoken==0.5.1 + # via -r test-requirements.txt +tokenize-rt==5.2.0 # via black -tokenizers==0.13.2 - # via transformers toml==0.10.2 - # via - # pre-commit - # responses + # via pre-commit tomli==2.0.1 # via # black # coverage # pylint # pytest -tomlkit==0.11.6 +tomlkit==0.12.1 # via pylint tornado==6.3.3 - # via livereload -tqdm==4.64.1 # via + # ipykernel + # jupyter-client + # livereload +tqdm==4.66.1 + # via + # -r test-requirements.txt + # datasets + # evaluate # huggingface-hub # hyperopt + # nltk # shap - # transformers -traitlets==5.5.0 +traitlets==5.11.2 # via + # comm + # ipykernel # ipython + # ipywidgets + # jupyter-client + # jupyter-core # matplotlib-inline -transformers==4.24.0 - # via -r test-requirements.txt +tritonclient[http]==2.38.0 + # via + # mlserver + # tritonclient typer[all]==0.7.0 - # via rstcheck -types-docutils==0.19.1.1 + # via + # rstcheck + # typer +types-docutils==0.19.1.9 # via rstcheck-core -types-toml==0.10.8.1 - # via responses -typing-extensions==4.4.0 +typing-extensions==4.8.0 # via + # alembic # astroid # azure-core + # azure-storage-blob + # azure-storage-file-datalake # black # huggingface-hub + # ipython # pydantic # pylint # rich -urllib3==1.26.12 + # sqlalchemy + # starlette + # uvicorn +tzdata==2023.3 + # via pandas +urllib3==1.26.18 # via # botocore + # databricks-cli # docker # requests # responses -virtualenv==20.16.7 +uvicorn==0.23.2 + # via mlserver +uvloop==0.19.0 + # via mlserver +virtualenv==20.24.6 # via pre-commit -wcwidth==0.2.5 +wcwidth==0.2.8 # via prompt-toolkit -websocket-client==1.4.2 +websocket-client==1.6.4 # via docker -werkzeug==2.2.2 +werkzeug==3.0.0 # via # flask # moto # pytest-localserver -wheel==0.38.4 - # via lightgbm -wrapt==1.14.1 +widgetsnbextension==4.0.9 + # via ipywidgets +wrapt==1.15.0 # via astroid -xgboost==1.7.1 +xgboost==2.0.0 # via flaml xmltodict==0.13.0 # via moto -zipp==3.10.0 +xxhash==3.4.1 + # via + # datasets + # evaluate +yarl==1.9.2 + # via aiohttp +zipp==3.17.0 # via # importlib-metadata # importlib-resources +zope-event==5.0 + # via gevent +zope-interface==6.1 + # via gevent # The following packages are considered to be unsafe in a requirements file: # pip diff --git a/requirements/doc-min-requirements.txt b/requirements/doc-min-requirements.txt new file mode 100644 index 0000000000000..e73f9d360fa18 --- /dev/null +++ b/requirements/doc-min-requirements.txt @@ -0,0 +1,8 @@ +# sphinx >= 4.0.0 is incompatible with our custom CSS styles and renders the documents improperly. +# See https://github.com/mlflow/mlflow/pull/4480 +sphinx==3.5.4 +jinja2==3.0.3 +# to be compatible with jinja2==3.0.3 +flask<=2.2.5 +sphinx-autobuild +sphinx-click diff --git a/requirements/doc-requirements.txt b/requirements/doc-requirements.txt index 648d5c98a1fc4..5327d4dde51c6 100644 --- a/requirements/doc-requirements.txt +++ b/requirements/doc-requirements.txt @@ -1,12 +1,4 @@ -# Dev/Deployment -# sphinx >= 4.0.0 is incompatible with our custom CSS styles and renders the documents improperly. -# See https://github.com/mlflow/mlflow/pull/4480 -sphinx==3.5.4 -jinja2==3.0.3 -# to be compatible with jinja2==3.0.3 -flask<=2.2.5 -sphinx-autobuild -sphinx-click +-r doc-min-requirements.txt tensorflow-cpu<=2.12.0 pyspark datasets From f6fd22e672b1892a21309c63f91a5f9c77861a95 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Tue, 24 Oct 2023 18:10:59 +0900 Subject: [PATCH 067/101] Fix MPD (#10089) Signed-off-by: harupy --- mlflow/store/artifact/cloud_artifact_repo.py | 21 +-- .../databricks_models_artifact_repo.py | 26 ++-- mlflow/utils/download_cloud_file_chunk.py | 33 +---- mlflow/utils/file_utils.py | 134 +++++++----------- mlflow/utils/request_utils.py | 2 +- .../test_databricks_models_artifact_repo.py | 31 ++-- 6 files changed, 99 insertions(+), 148 deletions(-) diff --git a/mlflow/store/artifact/cloud_artifact_repo.py b/mlflow/store/artifact/cloud_artifact_repo.py index dc04759563ede..0ddfdf57e18bf 100644 --- a/mlflow/store/artifact/cloud_artifact_repo.py +++ b/mlflow/store/artifact/cloud_artifact_repo.py @@ -210,21 +210,22 @@ def _parallelized_download_from_cloud(self, file_size, remote_file_path, local_p env=parallel_download_subproc_env, headers=self._extract_headers_from_credentials(cloud_credential_info.headers), ) - if any(not e.retryable for e in failed_downloads.values()): - template = "===== Chunk {index} =====\n{error}" - failure = "\n".join( - template.format(index=index, error=error) - for index, error in failed_downloads.items() - ) - raise MlflowException(f"Failed to download artifact {remote_file_path}:\n{failure}") if failed_downloads: new_cloud_creds = self._get_read_credential_infos([remote_file_path])[0] new_signed_uri = new_cloud_creds.signed_uri new_headers = self._extract_headers_from_credentials(new_cloud_creds.headers) - - for i in failed_downloads: - download_chunk(i, _DOWNLOAD_CHUNK_SIZE, new_headers, local_path, new_signed_uri) + for chunk in failed_downloads: + _logger.warning( + f"Retrying download of chunk {chunk.index} of {remote_file_path}" + ) + download_chunk( + range_start=chunk.start, + range_end=chunk.end, + headers=new_headers, + download_path=local_path, + http_uri=new_signed_uri, + ) def _download_file(self, remote_file_path, local_path): # list_artifacts API only returns a list of FileInfos at the specified path diff --git a/mlflow/store/artifact/databricks_models_artifact_repo.py b/mlflow/store/artifact/databricks_models_artifact_repo.py index 8db9a5f892f94..7349fa9e999d0 100644 --- a/mlflow/store/artifact/databricks_models_artifact_repo.py +++ b/mlflow/store/artifact/databricks_models_artifact_repo.py @@ -157,23 +157,23 @@ def _parallelized_download_from_cloud( env=parallel_download_subproc_env, headers=headers, ) - if any(not e.retryable for e in failed_downloads.values()): - template = "===== Chunk {index} =====\n{error}" - failure = "\n".join( - template.format(index=index, error=error) - for index, error in failed_downloads.items() - ) - raise MlflowException( - f"Failed to download artifact {dst_run_relative_artifact_path}:\n{failure}" - ) if failed_downloads: new_signed_uri, new_headers = self._get_signed_download_uri( dst_run_relative_artifact_path ) - for i in failed_downloads: - download_chunk( - i, _DOWNLOAD_CHUNK_SIZE, new_headers, dst_local_file_path, new_signed_uri - ) + new_headers = self._extract_headers_from_signed_url(new_headers) + for chunk in failed_downloads: + _logger.warning( + f"Retrying download of chunk {chunk.index} of " + f"{dst_run_relative_artifact_path}" + ) + download_chunk( + range_start=chunk.start, + range_end=chunk.end, + headers=new_headers, + download_path=dst_local_file_path, + http_uri=new_signed_uri, + ) def _download_file(self, remote_file_path, local_path): try: diff --git a/mlflow/utils/download_cloud_file_chunk.py b/mlflow/utils/download_cloud_file_chunk.py index 79ba106e6faeb..b2fd5d572e77e 100644 --- a/mlflow/utils/download_cloud_file_chunk.py +++ b/mlflow/utils/download_cloud_file_chunk.py @@ -7,8 +7,6 @@ import os import sys -from requests.exceptions import ChunkedEncodingError, ConnectionError, HTTPError - def parse_args(): parser = argparse.ArgumentParser() @@ -17,7 +15,6 @@ def parse_args(): parser.add_argument("--headers", required=True, type=str) parser.add_argument("--download-path", required=True, type=str) parser.add_argument("--http-uri", required=True, type=str) - parser.add_argument("--temp-file", required=True, type=str) return parser.parse_args() @@ -32,29 +29,13 @@ def main(): download_chunk = module.download_chunk args = parse_args() - - try: - download_chunk( - range_start=args.range_start, - range_end=args.range_end, - headers=json.loads(args.headers), - download_path=args.download_path, - http_uri=args.http_uri, - ) - except (ConnectionError, ChunkedEncodingError): - with open(args.temp_file, "w") as f: - json.dump({"retryable": True}, f) - raise - except HTTPError as e: - with open(args.temp_file, "w") as f: - json.dump( - { - "retryable": e.response.status_code in (401, 403, 408), - "status_code": e.response.status_code, - }, - f, - ) - raise + download_chunk( + range_start=args.range_start, + range_end=args.range_end, + headers=json.loads(args.headers), + download_path=args.download_path, + http_uri=args.http_uri, + ) if __name__ == "__main__": diff --git a/mlflow/utils/file_utils.py b/mlflow/utils/file_utils.py index f59e3f3033529..73c7c192147b1 100644 --- a/mlflow/utils/file_utils.py +++ b/mlflow/utils/file_utils.py @@ -19,8 +19,8 @@ import uuid from concurrent.futures import as_completed from contextlib import contextmanager +from dataclasses import dataclass from subprocess import CalledProcessError, TimeoutExpired -from typing import Optional from urllib.parse import unquote from urllib.request import pathname2url @@ -38,7 +38,7 @@ MLFLOW_DOWNLOAD_CHUNK_TIMEOUT, MLFLOW_ENABLE_ARTIFACTS_PROGRESS_BAR, ) -from mlflow.exceptions import MissingConfigException +from mlflow.exceptions import MissingConfigException, MlflowException from mlflow.protos.databricks_artifacts_pb2 import ArtifactCredentialType from mlflow.utils import download_cloud_file_chunk, merge_dicts from mlflow.utils.databricks_utils import _get_dbutils @@ -661,16 +661,19 @@ def download_file_using_http_uri(http_uri, download_path, chunk_size=100000000, output_file.write(chunk) -class _ChunkDownloadError(Exception): - def __init__(self, retryable: bool, error: str, status_code: Optional[int] = None) -> None: - self.retryable = retryable - self.error = error - self.status_code = status_code - super().__init__( - f"Chunk download failed: {error}" - if status_code is None - else f"Chunk download failed with status code {status_code}: {error}" - ) +@dataclass(frozen=True) +class _Chunk: + index: int + start: int + end: int + + +def _yield_chunks(file_size, chunk_size): + num_requests = int(math.ceil(file_size / float(chunk_size))) + for i in range(num_requests): + range_start = i * chunk_size + range_end = min(range_start + chunk_size - 1, file_size - 1) + yield _Chunk(i, range_start, range_end) def parallelized_download_file_using_http_uri( @@ -694,81 +697,54 @@ def parallelized_download_file_using_http_uri( Returns a dict of chunk index : exception, if one was thrown for that index. """ - def run_download(range_start, range_end): - template = """ + def run_download(chunk: _Chunk): + try: + subprocess.run( + [ + sys.executable, + download_cloud_file_chunk.__file__, + "--range-start", + str(chunk.start), + "--range-end", + str(chunk.end), + "--headers", + json.dumps(headers or {}), + "--download-path", + download_path, + "--http-uri", + http_uri, + ], + text=True, + check=True, + capture_output=True, + timeout=MLFLOW_DOWNLOAD_CHUNK_TIMEOUT.get(), + env=env, + ) + except (TimeoutExpired, CalledProcessError) as e: + raise MlflowException( + f""" ----- stdout ----- -{stdout} +{e.stdout.strip()} ----- stderr ----- -{stderr} +{e.stderr.strip()} """ - with tempfile.TemporaryDirectory() as tmpdir: - json_file = os.path.join(tmpdir, "http_error.json") - try: - subprocess.run( - [ - sys.executable, - download_cloud_file_chunk.__file__, - "--range-start", - str(range_start), - "--range-end", - str(range_end), - "--headers", - json.dumps(headers or {}), - "--download-path", - download_path, - "--http-uri", - http_uri, - "--temp-file", - json_file, - ], - text=True, - check=True, - capture_output=True, - timeout=MLFLOW_DOWNLOAD_CHUNK_TIMEOUT.get(), - env=env, - ) - except TimeoutExpired as e: - raise _ChunkDownloadError( - True, - template.format( - stdout=e.stdout.strip() or "(no stdout)", - stderr=e.stderr.strip() or "(no stderr)", - ), - ) from e - except CalledProcessError as e: - retryable = False - status_code = None - if os.path.exists(json_file): - with open(json_file) as f: - data = json.load(f) - retryable = data.get("retryable", False) - status_code = data.get("status_code") - raise _ChunkDownloadError( - retryable, - template.format( - stdout=e.stdout.strip() or "(no stdout)", - stderr=e.stderr.strip() or "(no stderr)", - ), - status_code, - ) from e - except Exception as e: - raise _ChunkDownloadError(False, str(e)) from e + ) from e - num_requests = int(math.ceil(file_size / float(chunk_size))) + chunks = _yield_chunks(file_size, chunk_size) # Create file if it doesn't exist or erase the contents if it does. We should do this here # before sending to the workers so they can each individually seek to their respective positions # and write chunks without overwriting. with open(download_path, "w"): pass - starting_index = 0 if uri_type == ArtifactCredentialType.GCP_SIGNED_URL or uri_type is None: + chunk = next(chunks) # GCP files could be transcoded, in which case the range header is ignored. # Test if this is the case by downloading one chunk and seeing if it's larger than the # requested size. If yes, let that be the file; if not, continue downloading more chunks. download_chunk( - range_start=0, - range_end=chunk_size - 1, + range_start=chunk.start, + range_end=chunk.end, headers=headers, download_path=download_path, http_uri=http_uri, @@ -778,24 +754,16 @@ def run_download(range_start, range_end): # so we don't need to consider this here if downloaded_size > chunk_size: return {} - else: - starting_index = 1 - - futures = {} - for i in range(starting_index, num_requests): - range_start = i * chunk_size - range_end = range_start + chunk_size - 1 - futures[thread_pool_executor.submit(run_download, range_start, range_end)] = i + futures = {thread_pool_executor.submit(run_download, chunk): chunk for chunk in chunks} failed_downloads = {} - with ArtifactProgressBar.chunks(file_size, f"Downloading {download_path}", chunk_size) as pbar: for future in as_completed(futures): - index = futures[future] + chunk = futures[future] try: future.result() except Exception: - failed_downloads[index] = future.exception() + failed_downloads[chunk] = future.exception() else: pbar.update() diff --git a/mlflow/utils/request_utils.py b/mlflow/utils/request_utils.py index f1d5687b3c040..713044299f380 100644 --- a/mlflow/utils/request_utils.py +++ b/mlflow/utils/request_utils.py @@ -39,7 +39,7 @@ def augmented_raise_for_status(response): raise e -def download_chunk(range_start, range_end, headers, download_path, http_uri): +def download_chunk(*, range_start, range_end, headers, download_path, http_uri): combined_headers = {**headers, "Range": f"bytes={range_start}-{range_end}"} with cloud_storage_http_request( diff --git a/tests/store/artifact/test_databricks_models_artifact_repo.py b/tests/store/artifact/test_databricks_models_artifact_repo.py index 210090de2cd38..002feabcd69fd 100644 --- a/tests/store/artifact/test_databricks_models_artifact_repo.py +++ b/tests/store/artifact/test_databricks_models_artifact_repo.py @@ -1,5 +1,4 @@ import json -import re from unittest import mock from unittest.mock import ANY @@ -13,7 +12,7 @@ _DOWNLOAD_CHUNK_SIZE, DatabricksModelsArtifactRepository, ) -from mlflow.utils.file_utils import _ChunkDownloadError +from mlflow.utils.file_utils import _Chunk DATABRICKS_MODEL_ARTIFACT_REPOSITORY_PACKAGE = ( "mlflow.store.artifact.databricks_models_artifact_repo" @@ -300,7 +299,7 @@ def test_parallelized_download_file_using_http_uri_with_error_downloads( "signed_uri": "https://my-amazing-signed-uri-to-rule-them-all.com/1234-numbers-yay-567", "headers": [{"name": "header_name", "value": "header_value"}], } - error_downloads = {1: _ChunkDownloadError(False, "Internal Server Error", 500)} + error_downloads = {_Chunk(1, 2, 3): Exception("Internal Server Error")} with mock.patch( DATABRICKS_MODEL_ARTIFACT_REPOSITORY + ".list_artifacts", @@ -314,19 +313,21 @@ def test_parallelized_download_file_using_http_uri_with_error_downloads( ), mock.patch( DATABRICKS_MODEL_ARTIFACT_REPOSITORY_PACKAGE + ".parallelized_download_file_using_http_uri", return_value=error_downloads, - ): - with pytest.raises( - MlflowException, - match=re.compile( - ( - rf"Failed to download artifact {re.escape(remote_file_path)}:" - r".+Internal Server Error" - ), - re.DOTALL, - ), - ): + ), mock.patch( + DATABRICKS_MODEL_ARTIFACT_REPOSITORY_PACKAGE + ".download_chunk", + side_effect=Exception("Retry failed"), + ) as mock_download_chunk: + with pytest.raises(MlflowException, match="Retry failed"): databricks_model_artifact_repo._download_file(remote_file_path, "") + mock_download_chunk.assert_called_once_with( + range_start=2, + range_end=3, + headers={"header_name": "header_value"}, + download_path="", + http_uri="https://my-amazing-signed-uri-to-rule-them-all.com/1234-numbers-yay-567", + ) + @pytest.mark.parametrize( ("remote_file_path"), @@ -342,7 +343,7 @@ def test_parallelized_download_file_using_http_uri_with_failed_downloads( "signed_uri": "https://my-amazing-signed-uri-to-rule-them-all.com/1234-numbers-yay-567", "headers": [{"name": "header_name", "value": "header_value"}], } - failed_downloads = {1: _ChunkDownloadError(True, "Unauthorized", 401)} + failed_downloads = {_Chunk(1, 2, 3): Exception("Internal Server Error")} with mock.patch( DATABRICKS_MODEL_ARTIFACT_REPOSITORY + ".list_artifacts", From 2bba4180c9b66580f1026252da826cc5eb0cdd60 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Tue, 24 Oct 2023 18:21:39 +0900 Subject: [PATCH 068/101] Remove tests for sqlalchemy 1.4 (#10097) Signed-off-by: harupy --- .github/workflows/master.yml | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 5035f8c5dde69..ff21ed15ec764 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -168,27 +168,6 @@ jobs: test $err = 0 - - name: Rebuild images with SQLAlchemy < 2.0 - run: | - sed -i 's/sqlalchemy.*/sqlalchemy<2.0/g' requirements/core-requirements.txt - git diff - ./tests/db/compose.sh build --build-arg DEPENDENCIES="$(python setup.py -q dependencies)" - - name: Run tests - run: | - set +e - err=0 - trap 'err=1' ERR - - for service in $(./tests/db/compose.sh config --services | grep '^mlflow-') - do - # Set `--no-TTY` to show container logs on GitHub Actions: - ./tests/db/compose.sh run --rm --no-TTY $service pytest \ - tests/store/tracking/test_sqlalchemy_store.py \ - tests/store/model_registry/test_sqlalchemy_store.py \ - tests/db - done - - test $err = 0 - name: Clean up run: | ./tests/db/compose.sh down --volumes --remove-orphans --rmi all From d38d67c1e4195e988e25a34cfa972b8e195dbe43 Mon Sep 17 00:00:00 2001 From: B-Step62 <31463517+B-Step62@users.noreply.github.com> Date: Tue, 24 Oct 2023 19:35:46 +0900 Subject: [PATCH 069/101] Upgrade Flask MV to 3.0 (#10098) Signed-off-by: B-Step62 <31463517+B-Step62@users.noreply.github.com> --- .github/workflows/requirements.yml | 3 ++- CONTRIBUTING.md | 5 +++-- mlflow/server/__init__.py | 3 +-- requirements/core-requirements.txt | 2 +- requirements/core-requirements.yaml | 2 +- 5 files changed, 8 insertions(+), 7 deletions(-) diff --git a/.github/workflows/requirements.yml b/.github/workflows/requirements.yml index 679145e25a0c3..d112ade1d758d 100644 --- a/.github/workflows/requirements.yml +++ b/.github/workflows/requirements.yml @@ -79,4 +79,5 @@ jobs: - name: Run tests run: | source dev/setup-ssh.sh - ./dev/run-python-tests.sh + pytest tests --quiet --requires-ssh --ignore-flavors \ + --ignore=tests/examples --ignore=tests/recipes --ignore=tests/evaluate diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 083ba6b28f826..c4e562cb33771 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -581,7 +581,8 @@ request by running: ```bash pre-commit run --all-files -./dev/run-python-tests.sh +pytest tests --quiet --requires-ssh --ignore-flavors \ + --ignore=tests/examples --ignore=tests/recipes --ignore=tests/evaluate ``` We use [pytest](https://docs.pytest.org/en/latest/contents.html) to run @@ -612,7 +613,7 @@ If you are adding new framework flavor support, you'll need to modify `pytest` and Github action configurations so tests for your code can run properly. Generally, the files you'll have to edit are: -1. `dev/run-python-tests.sh`: +1. `.github/workflows/master.yml`: lines where pytest runs with `--ignore-flavors` flag 1. Add your tests to the ignore list, where the other frameworks are ignored diff --git a/mlflow/server/__init__.py b/mlflow/server/__init__.py index 663f98de557e8..43a3ceee20fd6 100644 --- a/mlflow/server/__init__.py +++ b/mlflow/server/__init__.py @@ -7,7 +7,6 @@ import types from flask import Flask, Response, send_from_directory -from flask import __version__ as flask_version from packaging.version import Version from mlflow.exceptions import MlflowException @@ -40,7 +39,7 @@ REL_STATIC_DIR = "js/build" app = Flask(__name__, static_folder=REL_STATIC_DIR) -IS_FLASK_V1 = Version(flask_version) < Version("2.0") +IS_FLASK_V1 = Version(importlib.metadata.version("flask")) < Version("2.0") for http_path, handler, methods in handlers.get_endpoints(): diff --git a/requirements/core-requirements.txt b/requirements/core-requirements.txt index 99f4cdd62ba2d..11f7e3770348b 100644 --- a/requirements/core-requirements.txt +++ b/requirements/core-requirements.txt @@ -4,7 +4,7 @@ alembic<2,!=1.10.0 docker<7,>=4.0.0 -Flask<3 +Flask<4 numpy<2 scipy<2 pandas<3 diff --git a/requirements/core-requirements.yaml b/requirements/core-requirements.yaml index 5447b07c77648..4ffb44c6b6d99 100644 --- a/requirements/core-requirements.yaml +++ b/requirements/core-requirements.yaml @@ -17,7 +17,7 @@ docker: flask: pip_release: Flask - max_major_version: 2 + max_major_version: 3 numpy: pip_release: numpy From dfdfdceffffa8bfa49a6e60a228e13e3fbeb4c20 Mon Sep 17 00:00:00 2001 From: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com> Date: Tue, 24 Oct 2023 22:40:20 +0900 Subject: [PATCH 070/101] Fix failed requirements test workflow (#10102) Signed-off-by: B-Step62 --- .github/workflows/requirements.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/requirements.yml b/.github/workflows/requirements.yml index d112ade1d758d..cc990b41b1ffa 100644 --- a/.github/workflows/requirements.yml +++ b/.github/workflows/requirements.yml @@ -18,6 +18,7 @@ concurrency: cancel-in-progress: true env: + MLFLOW_HOME: /home/runner/work/mlflow/mlflow MLFLOW_CONDA_HOME: /usr/share/miniconda SPARK_LOCAL_IP: localhost PYTHON_VERSION: "3.8" From e0175ca4df8d2d7fef14d11928ba69d8a540a069 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Tue, 24 Oct 2023 23:33:51 +0900 Subject: [PATCH 071/101] Split `recipes-windows` job for faster PR merge (#10100) Signed-off-by: harupy --- .github/workflows/recipe.yml | 7 ++- tests/recipes/test_train_step.py | 79 +++++++++------------------- tests/recipes/test_transform_step.py | 19 +++++-- 3 files changed, 46 insertions(+), 59 deletions(-) diff --git a/.github/workflows/recipe.yml b/.github/workflows/recipe.yml index 84033b04c24e2..7e8b8618a4140 100644 --- a/.github/workflows/recipe.yml +++ b/.github/workflows/recipe.yml @@ -52,6 +52,11 @@ jobs: recipes-windows: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false runs-on: windows-latest + strategy: + matrix: + group: [1, 2] + include: + - splits: 2 steps: - uses: actions/checkout@v3 with: @@ -82,4 +87,4 @@ jobs: export HADOOP_HOME=/tmp/winutils/hadoop-3.2.2 export PATH=$PATH:$HADOOP_HOME/bin export MLFLOW_HOME=$(pwd) - pytest tests/recipes + pytest --splits ${{ matrix.splits }} --group ${{ matrix.group }} tests/recipes diff --git a/tests/recipes/test_train_step.py b/tests/recipes/test_train_step.py index 149c15ae58f01..e41366a6068ef 100644 --- a/tests/recipes/test_train_step.py +++ b/tests/recipes/test_train_step.py @@ -170,8 +170,6 @@ def setup_train_step_with_tuning( def test_train_step(tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path): train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) recipe_yaml = tmp_recipe_root_path.joinpath(_RECIPE_CONFIG_FILE_NAME) recipe_yaml.write_text( f""" @@ -195,7 +193,7 @@ def test_train_step(tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path): m_train.estimator_fn = estimator_fn recipe_config = read_yaml(tmp_recipe_root_path, _RECIPE_CONFIG_FILE_NAME) - with mock.patch.dict("sys.modules", {"steps.train": m_train}): + with mock.patch("steps.train.estimator_fn", estimator_fn): train_step = TrainStep.from_recipe_config(recipe_config, str(tmp_recipe_root_path)) train_step.run(str(train_step_output_dir)) @@ -205,13 +203,25 @@ def test_train_step(tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path): assert "training_mean_squared_error" in metrics +@pytest.fixture(autouse=True) +def dummy_train_step(tmp_recipe_root_path, monkeypatch): + # `mock.patch("steps.train.estimator_fn", ...)` would fail without this fixture + steps = tmp_recipe_root_path / "steps" + steps.mkdir(exist_ok=True) + steps.joinpath("train.py").write_text( + """ +def estimator_fn(estimator_params=None): + return None +""" + ) + monkeypatch.syspath_prepend(str(tmp_recipe_root_path)) + + @mock.patch("mlflow.recipes.steps.train._REBALANCING_CUTOFF", 50) def test_train_step_imbalanced_data(tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path): train_step_output_dir = setup_train_dataset( tmp_recipe_exec_path, recipe="classification/multiclass" ) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) recipe_yaml = tmp_recipe_root_path.joinpath(_RECIPE_CONFIG_FILE_NAME) recipe_yaml.write_text( f""" @@ -233,9 +243,7 @@ def test_train_step_imbalanced_data(tmp_recipe_root_path: Path, tmp_recipe_exec_ enabled: false """ ) - m_train = Mock() - m_train.estimator_fn = classifier_estimator_fn - with mock.patch.dict("sys.modules", {"steps.train": m_train}): + with mock.patch("steps.train.estimator_fn", classifier_estimator_fn): recipe_config = read_yaml(tmp_recipe_root_path, _RECIPE_CONFIG_FILE_NAME) train_step = TrainStep.from_recipe_config(recipe_config, str(tmp_recipe_root_path)) train_step.run(str(train_step_output_dir)) @@ -254,8 +262,6 @@ def test_train_step_classifier_automl( tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path, recipe ): train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path, recipe=recipe) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) recipe_yaml = tmp_recipe_root_path.joinpath(_RECIPE_CONFIG_FILE_NAME) recipe_yaml.write_text( """ @@ -364,12 +370,8 @@ def test_train_steps_writes_model_pkl_and_card( tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path, use_tuning ): train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) train_step = setup_train_step_with_tuning(tmp_recipe_root_path, use_tuning) - m_train = Mock() - m_train.estimator_fn = estimator_fn - with mock.patch.dict("sys.modules", {"steps.train": m_train}): + with mock.patch("steps.train.estimator_fn", estimator_fn): train_step.run(str(train_step_output_dir)) assert (train_step_output_dir / "model/python_model.pkl").exists() @@ -389,12 +391,8 @@ def test_train_steps_writes_card_with_model_and_run_links_on_databricks( monkeypatch.setenv("_DATABRICKS_WORKSPACE_ID", workspace_id) train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) train_step = setup_train_step_with_tuning(tmp_recipe_root_path, use_tuning) - m_train = Mock() - m_train.estimator_fn = estimator_fn - with mock.patch.dict("sys.modules", {"steps.train": m_train}): + with mock.patch("steps.train.estimator_fn", estimator_fn): train_step.run(str(train_step_output_dir)) with open(train_step_output_dir / "run_id") as f: @@ -414,12 +412,10 @@ def test_train_steps_writes_card_with_model_and_run_links_on_databricks( @pytest.mark.parametrize("use_tuning", [True, False]) def test_train_steps_autologs(tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path, use_tuning): train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) train_step = setup_train_step_with_tuning(tmp_recipe_root_path, use_tuning) m_train = Mock() m_train.estimator_fn = estimator_fn - with mock.patch.dict("sys.modules", {"steps.train": m_train}): + with mock.patch("steps.train.estimator_fn", estimator_fn): train_step.run(str(train_step_output_dir)) assert os.path.exists(train_step_output_dir / "run_id") @@ -440,8 +436,6 @@ def test_train_steps_with_correct_tags( ): monkeypatch.setenv(MLFLOW_RECIPES_EXECUTION_TARGET_STEP_NAME.name, "train") train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) train_step = setup_train_step_with_tuning(tmp_recipe_root_path, use_tuning) m_train = Mock() m_train.estimator_fn = estimator_fn @@ -465,12 +459,8 @@ def test_train_step_with_tuning_best_parameters( tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path ): train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) train_step = setup_train_step_with_tuning(tmp_recipe_root_path, use_tuning=True) - m_train = Mock() - m_train.estimator_fn = estimator_fn - with mock.patch.dict("sys.modules", {"steps.train": m_train}): + with mock.patch("steps.train.estimator_fn", estimator_fn): train_step.run(str(train_step_output_dir)) assert (train_step_output_dir / "best_parameters.yaml").exists() @@ -500,14 +490,10 @@ def test_train_step_with_tuning_output_yaml_correct( num_sections, ): train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) train_step = setup_train_step_with_tuning( tmp_recipe_root_path, use_tuning=True, with_hardcoded_params=with_hardcoded_params ) - m_train = Mock() - m_train.estimator_fn = estimator_fn - with mock.patch.dict("sys.modules", {"steps.train": m_train}): + with mock.patch("steps.train.estimator_fn", estimator_fn): train_step.run(str(train_step_output_dir)) assert (train_step_output_dir / "best_parameters.yaml").exists() @@ -528,12 +514,8 @@ def test_train_step_with_tuning_child_runs_and_early_stop( tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path ): train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) train_step = setup_train_step_with_tuning(tmp_recipe_root_path, use_tuning=True) - m_train = Mock() - m_train.estimator_fn = estimator_fn - with mock.patch.dict("sys.modules", {"steps.train": m_train}): + with mock.patch("steps.train.estimator_fn", estimator_fn): train_step.run(str(train_step_output_dir)) with open(train_step_output_dir / "run_id") as f: @@ -591,7 +573,7 @@ def test_automl( monkeypatch.setenv(MLFLOW_RECIPES_EXECUTION_TARGET_STEP_NAME.name, "train") train_step_output_dir = setup_train_dataset(tmp_recipe_exec_path) recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) + recipe_steps_dir.mkdir(exist_ok=True) if generate_custom_metrics: recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") recipe_steps_dir.joinpath("custom_metrics.py").write_text( @@ -618,9 +600,8 @@ def weighted_mean_squared_error(eval_df, builtin_metrics): use_tuning=True, with_hardcoded_params=False, ) - m_train = Mock() - m_train.estimator_fn = estimator_fn - with mock.patch.dict("sys.modules", {"steps.train": m_train}): + + with mock.patch("steps.train.estimator_fn", estimator_fn): train_step._validate_and_apply_step_config() train_step._run(str(train_step_output_dir)) @@ -636,8 +617,6 @@ def test_tuning_multiclass(tmp_recipe_root_path: Path, tmp_recipe_exec_path: Pat train_step_output_dir = setup_train_dataset( tmp_recipe_exec_path, recipe="classification/multiclass" ) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) train_step = setup_train_step_with_tuning( tmp_recipe_root_path, @@ -673,8 +652,6 @@ def test_train_step_with_predict_probability( train_step_output_dir = setup_train_dataset( tmp_recipe_exec_path, recipe="classification/binary" ) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) recipe_yaml = tmp_recipe_root_path.joinpath(_RECIPE_CONFIG_FILE_NAME) recipe_yaml.write_text( f""" @@ -742,8 +719,6 @@ def test_train_step_with_predict_probability_with_custom_prefix( train_step_output_dir = setup_train_dataset( tmp_recipe_exec_path, recipe="classification/binary" ) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) recipe_yaml = tmp_recipe_root_path.joinpath(_RECIPE_CONFIG_FILE_NAME) recipe_yaml.write_text( f""" @@ -797,8 +772,6 @@ def test_train_step_with_label_encoding(tmp_recipe_root_path: Path, tmp_recipe_e train_step_output_dir = setup_train_dataset( tmp_recipe_exec_path, recipe="classification/multiclass" ) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) recipe_yaml = tmp_recipe_root_path.joinpath(_RECIPE_CONFIG_FILE_NAME) recipe_yaml.write_text( f""" @@ -849,8 +822,6 @@ def test_train_step_with_probability_calibration( train_step_output_dir = setup_train_dataset( tmp_recipe_exec_path, recipe="classification/binary" ) - recipe_steps_dir = tmp_recipe_root_path.joinpath("steps") - recipe_steps_dir.mkdir(parents=True) recipe_yaml = tmp_recipe_root_path.joinpath(_RECIPE_CONFIG_FILE_NAME) recipe_yaml.write_text( f""" diff --git a/tests/recipes/test_transform_step.py b/tests/recipes/test_transform_step.py index 384842968f7a4..8a1516375be87 100644 --- a/tests/recipes/test_transform_step.py +++ b/tests/recipes/test_transform_step.py @@ -1,7 +1,6 @@ import os from pathlib import Path from unittest import mock -from unittest.mock import Mock import pandas as pd import pytest @@ -15,6 +14,20 @@ from mlflow.utils.file_utils import read_yaml +@pytest.fixture(autouse=True) +def dummy_transform_step(tmp_recipe_root_path, monkeypatch): + # `mock.patch("steps.transform.transformer_fn", ...)` would fail without this fixture + steps = tmp_recipe_root_path / "steps" + steps.mkdir(exist_ok=True) + steps.joinpath("transform.py").write_text( + """ +def transformer_fn(estimator_params=None): + return None +""" + ) + monkeypatch.syspath_prepend(str(tmp_recipe_root_path)) + + # Sets up the transform step and returns the constructed TransformStep instance and step output dir def set_up_transform_step(recipe_root: Path, transform_user_module): split_step_output_dir = recipe_root.joinpath("steps", "split", "outputs") @@ -61,10 +74,8 @@ def test_transform_step_writes_onehot_encoded_dataframe_and_transformer_pkl( ): from sklearn.preprocessing import StandardScaler - m = Mock() - m.transformer_fn = lambda: StandardScaler() # pylint: disable=unnecessary-lambda monkeypatch.setenv(MLFLOW_RECIPES_EXECUTION_DIRECTORY.name, str(tmp_recipe_root_path)) - with mock.patch.dict("sys.modules", {"steps.transform": m}): + with mock.patch("steps.transform.transformer_fn", lambda: StandardScaler()): transform_step, transform_step_output_dir, _ = set_up_transform_step( tmp_recipe_root_path, "transformer_fn" ) From fee22640d4dfd445b1ae4034a450575469ea008b Mon Sep 17 00:00:00 2001 From: Jerry Liang <66143562+jerrylian-db@users.noreply.github.com> Date: Tue, 24 Oct 2023 11:14:56 -0700 Subject: [PATCH 072/101] Update database test schemas (#10109) Signed-off-by: Jerry Liang --- tests/db/schemas/mssql.sql | 49 ++++++++++++++++++---- tests/db/schemas/mysql.sql | 50 ++++++++++++++++++---- tests/db/schemas/postgresql.sql | 62 +++++++++++++++++++++------- tests/db/schemas/sqlite.sql | 52 ++++++++++++++++++----- tests/resources/db/latest_schema.sql | 4 +- 5 files changed, 173 insertions(+), 44 deletions(-) diff --git a/tests/db/schemas/mssql.sql b/tests/db/schemas/mssql.sql index 4c033a444d344..9021ebb57aef4 100644 --- a/tests/db/schemas/mssql.sql +++ b/tests/db/schemas/mssql.sql @@ -16,6 +16,24 @@ CREATE TABLE experiments ( ) +CREATE TABLE input_tags ( + input_uuid VARCHAR(36) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + name VARCHAR(255) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + value VARCHAR(500) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + CONSTRAINT input_tags_pk PRIMARY KEY (input_uuid, name) +) + + +CREATE TABLE inputs ( + input_uuid VARCHAR(36) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + source_type VARCHAR(36) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + source_id VARCHAR(36) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + destination_type VARCHAR(36) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + destination_id VARCHAR(36) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + CONSTRAINT inputs_pk PRIMARY KEY (source_type, source_id, destination_type, destination_id) +) + + CREATE TABLE registered_models ( name VARCHAR(256) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, creation_time BIGINT, @@ -25,6 +43,20 @@ CREATE TABLE registered_models ( ) +CREATE TABLE datasets ( + dataset_uuid VARCHAR(36) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + experiment_id INTEGER NOT NULL, + name VARCHAR(500) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + digest VARCHAR(36) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + dataset_source_type VARCHAR(36) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + dataset_source VARCHAR COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + dataset_schema VARCHAR COLLATE "SQL_Latin1_General_CP1_CI_AS", + dataset_profile VARCHAR COLLATE "SQL_Latin1_General_CP1_CI_AS", + CONSTRAINT dataset_pk PRIMARY KEY (experiment_id, name, digest), + CONSTRAINT "FK__datasets__experi__6477ECF3" FOREIGN KEY(experiment_id) REFERENCES experiments (experiment_id) +) + + CREATE TABLE experiment_tags ( key VARCHAR(250) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, value VARCHAR(5000) COLLATE "SQL_Latin1_General_CP1_CI_AS", @@ -52,6 +84,15 @@ CREATE TABLE model_versions ( ) +CREATE TABLE registered_model_aliases ( + alias VARCHAR(256) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + version INTEGER NOT NULL, + name VARCHAR(256) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, + CONSTRAINT registered_model_alias_pk PRIMARY KEY (name, alias), + CONSTRAINT registered_model_alias_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON DELETE CASCADE ON UPDATE CASCADE +) + + CREATE TABLE registered_model_tags ( key VARCHAR(250) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, value VARCHAR(5000) COLLATE "SQL_Latin1_General_CP1_CI_AS", @@ -131,11 +172,3 @@ CREATE TABLE tags ( CONSTRAINT tag_pk PRIMARY KEY (key, run_uuid), CONSTRAINT "FK__tags__run_uuid__412EB0B6" FOREIGN KEY(run_uuid) REFERENCES runs (run_uuid) ) - -CREATE TABLE registered_model_aliases ( - name VARCHAR(256) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, - alias VARCHAR(256) COLLATE "SQL_Latin1_General_CP1_CI_AS" NOT NULL, - version INTEGER NOT NULL, - CONSTRAINT registered_model_alias_pk PRIMARY KEY (name, alias), - CONSTRAINT registered_model_alias_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON UPDATE CASCADE ON DELETE CASCADE -) diff --git a/tests/db/schemas/mysql.sql b/tests/db/schemas/mysql.sql index 31b9cd857da2d..dccf071fe4220 100644 --- a/tests/db/schemas/mysql.sql +++ b/tests/db/schemas/mysql.sql @@ -17,6 +17,24 @@ CREATE TABLE experiments ( ) +CREATE TABLE input_tags ( + input_uuid VARCHAR(36) NOT NULL, + name VARCHAR(255) NOT NULL, + value VARCHAR(500) NOT NULL, + PRIMARY KEY (input_uuid, name) +) + + +CREATE TABLE inputs ( + input_uuid VARCHAR(36) NOT NULL, + source_type VARCHAR(36) NOT NULL, + source_id VARCHAR(36) NOT NULL, + destination_type VARCHAR(36) NOT NULL, + destination_id VARCHAR(36) NOT NULL, + PRIMARY KEY (source_type, source_id, destination_type, destination_id) +) + + CREATE TABLE registered_models ( name VARCHAR(256) NOT NULL, creation_time BIGINT, @@ -26,6 +44,20 @@ CREATE TABLE registered_models ( ) +CREATE TABLE datasets ( + dataset_uuid VARCHAR(36) NOT NULL, + experiment_id INTEGER NOT NULL, + name VARCHAR(500) NOT NULL, + digest VARCHAR(36) NOT NULL, + dataset_source_type VARCHAR(36) NOT NULL, + dataset_source TEXT NOT NULL, + dataset_schema TEXT, + dataset_profile MEDIUMTEXT, + PRIMARY KEY (experiment_id, name, digest), + CONSTRAINT datasets_ibfk_1 FOREIGN KEY(experiment_id) REFERENCES experiments (experiment_id) +) + + CREATE TABLE experiment_tags ( key VARCHAR(250) NOT NULL, value VARCHAR(5000), @@ -53,6 +85,15 @@ CREATE TABLE model_versions ( ) +CREATE TABLE registered_model_aliases ( + alias VARCHAR(256) NOT NULL, + version INTEGER NOT NULL, + name VARCHAR(256) NOT NULL, + PRIMARY KEY (name, alias), + CONSTRAINT registered_model_alias_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON DELETE CASCADE ON UPDATE CASCADE +) + + CREATE TABLE registered_model_tags ( key VARCHAR(250) NOT NULL, value VARCHAR(5000), @@ -138,12 +179,3 @@ CREATE TABLE tags ( PRIMARY KEY (key, run_uuid), CONSTRAINT tags_ibfk_1 FOREIGN KEY(run_uuid) REFERENCES runs (run_uuid) ) - - -CREATE TABLE registered_model_aliases ( - name VARCHAR(256) NOT NULL, - alias VARCHAR(256) NOT NULL, - version INTEGER NOT NULL, - CONSTRAINT registered_model_alias_pk PRIMARY KEY (name, alias), - CONSTRAINT registered_model_alias_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON UPDATE CASCADE ON DELETE CASCADE -) diff --git a/tests/db/schemas/postgresql.sql b/tests/db/schemas/postgresql.sql index 223c8cb9b8730..f1134e4d03d94 100644 --- a/tests/db/schemas/postgresql.sql +++ b/tests/db/schemas/postgresql.sql @@ -14,7 +14,25 @@ CREATE TABLE experiments ( last_update_time BIGINT, CONSTRAINT experiment_pk PRIMARY KEY (experiment_id), CONSTRAINT experiments_name_key UNIQUE (name), - CONSTRAINT experiments_lifecycle_stage CHECK ((lifecycle_stage)::text = ANY ((ARRAY['active'::character varying, 'deleted'::character varying])::text[])) + CONSTRAINT experiments_lifecycle_stage CHECK (lifecycle_stage::text = ANY (ARRAY['active'::character varying, 'deleted'::character varying]::text[])) +) + + +CREATE TABLE input_tags ( + input_uuid VARCHAR(36) NOT NULL, + name VARCHAR(255) NOT NULL, + value VARCHAR(500) NOT NULL, + CONSTRAINT input_tags_pk PRIMARY KEY (input_uuid, name) +) + + +CREATE TABLE inputs ( + input_uuid VARCHAR(36) NOT NULL, + source_type VARCHAR(36) NOT NULL, + source_id VARCHAR(36) NOT NULL, + destination_type VARCHAR(36) NOT NULL, + destination_id VARCHAR(36) NOT NULL, + CONSTRAINT inputs_pk PRIMARY KEY (source_type, source_id, destination_type, destination_id) ) @@ -27,6 +45,20 @@ CREATE TABLE registered_models ( ) +CREATE TABLE datasets ( + dataset_uuid VARCHAR(36) NOT NULL, + experiment_id INTEGER NOT NULL, + name VARCHAR(500) NOT NULL, + digest VARCHAR(36) NOT NULL, + dataset_source_type VARCHAR(36) NOT NULL, + dataset_source TEXT NOT NULL, + dataset_schema TEXT, + dataset_profile TEXT, + CONSTRAINT dataset_pk PRIMARY KEY (experiment_id, name, digest), + CONSTRAINT datasets_experiment_id_fkey FOREIGN KEY(experiment_id) REFERENCES experiments (experiment_id) +) + + CREATE TABLE experiment_tags ( key VARCHAR(250) NOT NULL, value VARCHAR(5000), @@ -54,6 +86,15 @@ CREATE TABLE model_versions ( ) +CREATE TABLE registered_model_aliases ( + alias VARCHAR(256) NOT NULL, + version INTEGER NOT NULL, + name VARCHAR(256) NOT NULL, + CONSTRAINT registered_model_alias_pk PRIMARY KEY (name, alias), + CONSTRAINT registered_model_alias_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON DELETE CASCADE ON UPDATE CASCADE +) + + CREATE TABLE registered_model_tags ( key VARCHAR(250) NOT NULL, value VARCHAR(5000), @@ -80,15 +121,15 @@ CREATE TABLE runs ( deleted_time BIGINT, CONSTRAINT run_pk PRIMARY KEY (run_uuid), CONSTRAINT runs_experiment_id_fkey FOREIGN KEY(experiment_id) REFERENCES experiments (experiment_id), - CONSTRAINT source_type CHECK ((source_type)::text = ANY ((ARRAY['NOTEBOOK'::character varying, 'JOB'::character varying, 'LOCAL'::character varying, 'UNKNOWN'::character varying, 'PROJECT'::character varying])::text[])), - CONSTRAINT runs_lifecycle_stage CHECK ((lifecycle_stage)::text = ANY ((ARRAY['active'::character varying, 'deleted'::character varying])::text[])), - CONSTRAINT runs_status_check CHECK ((status)::text = ANY ((ARRAY['SCHEDULED'::character varying, 'FAILED'::character varying, 'FINISHED'::character varying, 'RUNNING'::character varying, 'KILLED'::character varying])::text[])) + CONSTRAINT runs_lifecycle_stage CHECK (lifecycle_stage::text = ANY (ARRAY['active'::character varying, 'deleted'::character varying]::text[])), + CONSTRAINT runs_status_check CHECK (status::text = ANY (ARRAY['SCHEDULED'::character varying, 'FAILED'::character varying, 'FINISHED'::character varying, 'RUNNING'::character varying, 'KILLED'::character varying]::text[])), + CONSTRAINT source_type CHECK (source_type::text = ANY (ARRAY['NOTEBOOK'::character varying, 'JOB'::character varying, 'LOCAL'::character varying, 'UNKNOWN'::character varying, 'PROJECT'::character varying]::text[])) ) CREATE TABLE latest_metrics ( key VARCHAR(250) NOT NULL, - value DOUBLE_PRECISION NOT NULL, + value DOUBLE PRECISION NOT NULL, timestamp BIGINT, step BIGINT NOT NULL, is_nan BOOLEAN NOT NULL, @@ -100,7 +141,7 @@ CREATE TABLE latest_metrics ( CREATE TABLE metrics ( key VARCHAR(250) NOT NULL, - value DOUBLE_PRECISION NOT NULL, + value DOUBLE PRECISION NOT NULL, timestamp BIGINT NOT NULL, run_uuid VARCHAR(32) NOT NULL, step BIGINT DEFAULT '0'::bigint NOT NULL, @@ -136,12 +177,3 @@ CREATE TABLE tags ( CONSTRAINT tag_pk PRIMARY KEY (key, run_uuid), CONSTRAINT tags_run_uuid_fkey FOREIGN KEY(run_uuid) REFERENCES runs (run_uuid) ) - - -CREATE TABLE registered_model_aliases ( - name VARCHAR(256) NOT NULL, - alias VARCHAR(256) NOT NULL, - version INTEGER NOT NULL, - CONSTRAINT registered_model_alias_pk PRIMARY KEY (name, alias), - CONSTRAINT registered_model_alias_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON UPDATE CASCADE ON DELETE CASCADE -) diff --git a/tests/db/schemas/sqlite.sql b/tests/db/schemas/sqlite.sql index 22162210ef130..2b385dc506822 100644 --- a/tests/db/schemas/sqlite.sql +++ b/tests/db/schemas/sqlite.sql @@ -18,6 +18,24 @@ CREATE TABLE experiments ( ) +CREATE TABLE input_tags ( + input_uuid VARCHAR(36) NOT NULL, + name VARCHAR(255) NOT NULL, + value VARCHAR(500) NOT NULL, + CONSTRAINT input_tags_pk PRIMARY KEY (input_uuid, name) +) + + +CREATE TABLE inputs ( + input_uuid VARCHAR(36) NOT NULL, + source_type VARCHAR(36) NOT NULL, + source_id VARCHAR(36) NOT NULL, + destination_type VARCHAR(36) NOT NULL, + destination_id VARCHAR(36) NOT NULL, + CONSTRAINT inputs_pk PRIMARY KEY (source_type, source_id, destination_type, destination_id) +) + + CREATE TABLE registered_models ( name VARCHAR(256) NOT NULL, creation_time BIGINT, @@ -28,6 +46,20 @@ CREATE TABLE registered_models ( ) +CREATE TABLE datasets ( + dataset_uuid VARCHAR(36) NOT NULL, + experiment_id INTEGER NOT NULL, + name VARCHAR(500) NOT NULL, + digest VARCHAR(36) NOT NULL, + dataset_source_type VARCHAR(36) NOT NULL, + dataset_source TEXT NOT NULL, + dataset_schema TEXT, + dataset_profile TEXT, + CONSTRAINT dataset_pk PRIMARY KEY (experiment_id, name, digest), + FOREIGN KEY(experiment_id) REFERENCES experiments (experiment_id) +) + + CREATE TABLE experiment_tags ( key VARCHAR(250) NOT NULL, value VARCHAR(5000), @@ -55,6 +87,15 @@ CREATE TABLE model_versions ( ) +CREATE TABLE registered_model_aliases ( + alias VARCHAR(256) NOT NULL, + version INTEGER NOT NULL, + name VARCHAR(256) NOT NULL, + CONSTRAINT registered_model_alias_pk PRIMARY KEY (name, alias), + CONSTRAINT registered_model_alias_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON DELETE CASCADE ON UPDATE CASCADE +) + + CREATE TABLE registered_model_tags ( key VARCHAR(250) NOT NULL, value VARCHAR(5000), @@ -81,8 +122,8 @@ CREATE TABLE runs ( deleted_time BIGINT, CONSTRAINT run_pk PRIMARY KEY (run_uuid), FOREIGN KEY(experiment_id) REFERENCES experiments (experiment_id), - CONSTRAINT source_type CHECK (source_type IN ('NOTEBOOK', 'JOB', 'LOCAL', 'UNKNOWN', 'PROJECT')), CONSTRAINT runs_lifecycle_stage CHECK (lifecycle_stage IN ('active', 'deleted')), + CONSTRAINT source_type CHECK (source_type IN ('NOTEBOOK', 'JOB', 'LOCAL', 'UNKNOWN', 'PROJECT')), CHECK (status IN ('SCHEDULED', 'FAILED', 'FINISHED', 'RUNNING', 'KILLED')) ) @@ -139,12 +180,3 @@ CREATE TABLE tags ( CONSTRAINT tag_pk PRIMARY KEY (key, run_uuid), FOREIGN KEY(run_uuid) REFERENCES runs (run_uuid) ) - - -CREATE TABLE registered_model_aliases ( - name VARCHAR(256) NOT NULL, - alias VARCHAR(256) NOT NULL, - version INTEGER NOT NULL, - CONSTRAINT registered_model_alias_pk PRIMARY KEY (name, alias), - CONSTRAINT registered_model_alias_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON UPDATE CASCADE ON DELETE CASCADE -) diff --git a/tests/resources/db/latest_schema.sql b/tests/resources/db/latest_schema.sql index 1c5f9d0715c35..23dd6f13b5383 100644 --- a/tests/resources/db/latest_schema.sql +++ b/tests/resources/db/latest_schema.sql @@ -88,9 +88,9 @@ CREATE TABLE model_versions ( CREATE TABLE registered_model_aliases ( - name VARCHAR(256) NOT NULL, alias VARCHAR(256) NOT NULL, version INTEGER NOT NULL, + name VARCHAR(256) NOT NULL, CONSTRAINT registered_model_alias_pk PRIMARY KEY (name, alias), CONSTRAINT registered_model_alias_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON DELETE CASCADE ON UPDATE CASCADE ) @@ -122,8 +122,8 @@ CREATE TABLE runs ( deleted_time BIGINT, CONSTRAINT run_pk PRIMARY KEY (run_uuid), FOREIGN KEY(experiment_id) REFERENCES experiments (experiment_id), - CONSTRAINT source_type CHECK (source_type IN ('NOTEBOOK', 'JOB', 'LOCAL', 'UNKNOWN', 'PROJECT')), CONSTRAINT runs_lifecycle_stage CHECK (lifecycle_stage IN ('active', 'deleted')), + CONSTRAINT source_type CHECK (source_type IN ('NOTEBOOK', 'JOB', 'LOCAL', 'UNKNOWN', 'PROJECT')), CHECK (status IN ('SCHEDULED', 'FAILED', 'FINISHED', 'RUNNING', 'KILLED')) ) From ccc112d7c10c6086499ee1e7b38afe356c3435f4 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Wed, 25 Oct 2023 03:19:07 +0900 Subject: [PATCH 073/101] Skip `test_train_step_with_probability_calibration` on Windows (#10099) Signed-off-by: harupy --- tests/recipes/test_train_step.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/recipes/test_train_step.py b/tests/recipes/test_train_step.py index e41366a6068ef..c83ed35644e25 100644 --- a/tests/recipes/test_train_step.py +++ b/tests/recipes/test_train_step.py @@ -816,6 +816,10 @@ def test_train_step_with_label_encoding(tmp_recipe_root_path: Path, tmp_recipe_e assert np.array_equal(np.unique(predicted_label), np.array(["a1", "a2", "a3", "b"])) +@pytest.mark.skipif( + os.name == "nt", + reason="Flaky on windows, sometimes fails with `(sqlite3.OperationalError) database is locked`", +) def test_train_step_with_probability_calibration( tmp_recipe_root_path: Path, tmp_recipe_exec_path: Path ): From 4a46a78bf4b95ca023d6146e3395875701edc4e9 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Tue, 24 Oct 2023 11:20:13 -0700 Subject: [PATCH 074/101] Improve make_genai_metric docstring (#10081) Signed-off-by: Ann Zhang --- mlflow/metrics/genai/genai_metric.py | 38 +++++++++++++++------------- 1 file changed, 20 insertions(+), 18 deletions(-) diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index a57b0887296ea..baa23028a24b4 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -144,7 +144,7 @@ def make_genai_metric( "its purpose, and its developer. It could be more concise for a 5-score.", ), grading_context={ - "ground_truth": ( + "targets": ( "MLflow is an open-source platform for managing " "the end-to-end machine learning (ML) lifecycle. It was developed by " "Databricks, a company that specializes in big data and machine learning " @@ -156,30 +156,32 @@ def make_genai_metric( ) metric = make_genai_metric( - name="correctness", + name="answer_correctness", definition=( - "Correctness refers to how well the generated output matches " - "or aligns with the reference or ground truth text that is considered " - "accurate and appropriate for the given input. The ground truth serves as " - "a benchmark against which the provided output is compared to determine the " - "level of accuracy and fidelity." + "Answer correctness is evaluated on the accuracy of the provided output based on " + "the provided targets, which is the ground truth. Scores can be assigned based on " + "the degree of semantic similarity and factual correctness of the provided output " + "to the provided targets, where a higher score indicates higher degree of accuracy." ), grading_prompt=( - "Correctness: If the answer correctly answer the question, below " - "are the details for different scores: " - "- Score 0: the answer is completely incorrect, doesn’t mention anything about " - "the question or is completely contrary to the correct answer. " - "- Score 1: the answer provides some relevance to the question and answer " - "one aspect of the question correctly. " - "- Score 2: the answer mostly answer the question but is missing or hallucinating " - "on one critical aspect. " - "- Score 4: the answer correctly answer the question and not missing any " - "major aspect" + "Answer correctness: Below are the details for different scores:" + "- Score 1: The output is completely incorrect. It is completely different from " + "or contradicts the provided targets." + "- Score 2: The output demonstrates some degree of semantic similarity and " + "includes partially correct information. However, the output still has significant " + "discrepancies with the provided targets or inaccuracies." + "- Score 3: The output addresses a couple of aspects of the input accurately, " + "aligning with the provided targets. However, there are still omissions or minor " + "inaccuracies." + "- Score 4: The output is mostly correct. It provides mostly accurate information, " + "but there may be one or more minor omissions or inaccuracies." + "- Score 5: The output is correct. It demonstrates a high degree of accuracy and " + "semantic similarity to the targets." ), examples=[example], version="v1", model="openai:/gpt-4", - grading_context_columns=["ground_truth"], + grading_context_columns=["targets"], parameters={"temperature": 0.0}, aggregations=["mean", "variance", "p90"], greater_is_better=True, From ba274942d59ba8f0bd8314af0663a099666d7d78 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Tue, 24 Oct 2023 11:21:48 -0700 Subject: [PATCH 075/101] Logger output when predicting (#10107) Signed-off-by: Prithvi Kannan --- mlflow/models/evaluation/default_evaluator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index 607dae155ebf5..b58c4a6055865 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1288,6 +1288,7 @@ def _generate_model_predictions(self, compute_latency=False): """ Helper method for generating model predictions """ + _logger.info("Computing model predictions.") def predict_with_latency(X_copy): y_pred_list = [] From 3a77b7321a1ec71e2660cfbdb897dc3010f3ca79 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Tue, 24 Oct 2023 12:10:18 -0700 Subject: [PATCH 076/101] Don't try to log metrics with "None" value (#10082) --- mlflow/models/evaluation/default_evaluator.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index b58c4a6055865..c424ee02129da 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1597,10 +1597,11 @@ def _update_metrics(self): for metric_name, metric_value in self.metrics_values.items(): if metric_value.aggregate_results: for agg_name, agg_value in metric_value.aggregate_results.items(): - if agg_name == metric_name.split("/")[0]: - self.metrics[metric_name] = agg_value - else: - self.metrics[f"{metric_name}/{agg_name}"] = agg_value + if agg_value is not None: + if agg_name == metric_name.split("/")[0]: + self.metrics[metric_name] = agg_value + else: + self.metrics[f"{metric_name}/{agg_name}"] = agg_value def _evaluate( self, From f10740b9fb28263d10a75c96cd3f0156bbf3ab98 Mon Sep 17 00:00:00 2001 From: Ann Zhang Date: Tue, 24 Oct 2023 13:21:20 -0700 Subject: [PATCH 077/101] Check if extra metrics have type EvaluationMetric (#10083) Signed-off-by: Ann Zhang --- mlflow/models/evaluation/default_evaluator.py | 16 +++++++++++++++ tests/evaluate/test_default_evaluator.py | 20 +++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index c424ee02129da..f00b4efa76d02 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -25,6 +25,7 @@ from mlflow.entities.metric import Metric from mlflow.exceptions import MlflowException from mlflow.metrics import ( + EvaluationMetric, MetricValue, ari_grade_level, exact_match, @@ -1744,6 +1745,21 @@ def evaluate( if self.extra_metrics is None: self.extra_metrics = [] + bad_metrics = [] + for metric in self.extra_metrics: + if not isinstance(metric, EvaluationMetric): + bad_metrics.append(metric) + if len(bad_metrics) > 0: + message = "\n".join( + [f"- Metric '{m}' has type '{type(m).__name__}'" for m in bad_metrics] + ) + raise MlflowException( + f"In the 'extra_metrics' parameter, the following metrics have the wrong type:\n" + f"{message}\n" + f"Please ensure that all extra metrics are instances of " + f"mlflow.metrics.EvaluationMetric." + ) + if self.model_type in (_ModelType.CLASSIFIER, _ModelType.REGRESSOR): inferred_model_type = _infer_model_type_by_labels(self.y) if inferred_model_type is not None and model_type != inferred_model_type: diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index b67a6147179a7..fe8e1570fb3de 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -3011,6 +3011,26 @@ def test_multi_output_model_error_handling(): ) +def test_invalid_extra_metrics(): + with mlflow.start_run(): + model_info = mlflow.pyfunc.log_model( + artifact_path="model", python_model=language_model, input_example=["a", "b"] + ) + data = pd.DataFrame({"text": ["Hello world", "My name is MLflow"]}) + with pytest.raises( + MlflowException, + match="Please ensure that all extra metrics are instances of " + "mlflow.metrics.EvaluationMetric.", + ): + mlflow.evaluate( + model_info.model_uri, + data, + model_type="text", + evaluators="default", + extra_metrics=[mlflow.metrics.latency], + ) + + def test_evaluate_with_latency(): with mlflow.start_run() as run: model_info = mlflow.pyfunc.log_model( From 49415f37edc02da7c8e9ccee2b78e294fe5aa26e Mon Sep 17 00:00:00 2001 From: Sunish Sheth Date: Tue, 24 Oct 2023 17:01:34 -0700 Subject: [PATCH 078/101] [bug-fix] Fixing target and prediction mapping for renamed keys (#10114) Signed-off-by: Sunish Sheth --- mlflow/models/evaluation/base.py | 9 +++ tests/evaluate/test_default_evaluator.py | 72 ++++++++++++++++++++++++ 2 files changed, 81 insertions(+) diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 7f9a92d8c6d7f..7a2c93e902ad2 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -1667,6 +1667,15 @@ def pred_sample(eval_df, _builtin_metrics, _artifacts_dir): from mlflow.pyfunc import PyFuncModel, _load_model_or_server, _ServedPyFuncModel from mlflow.utils import env_manager as _EnvManager + if evaluator_config is not None: + col_mapping = evaluator_config.get("col_mapping", {}) + + if isinstance(targets, str): + targets = col_mapping.get(targets, targets) + + if isinstance(predictions, str): + predictions = col_mapping.get(predictions, predictions) + if data is None: raise MlflowException( message="The data argument cannot be None.", error_code=INVALID_PARAMETER_VALUE diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index fe8e1570fb3de..d36fc221867c5 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -3244,3 +3244,75 @@ def test_evaluate_with_numpy_array(): "outputs", "toxicity/v1/score", ] + + +def test_target_prediction_col_mapping(): + metric = mlflow.metrics.make_genai_metric( + name="correctness", + definition=( + "Correctness refers to how well the generated output matches " + "or aligns with the reference or ground truth text that is considered " + "accurate and appropriate for the given input. The ground truth serves as " + "a benchmark against which the provided output is compared to determine the " + "level of accuracy and fidelity." + ), + grading_prompt=( + "Correctness: If the answer correctly answer the question, below " + "are the details for different scores: " + "- Score 0: the answer is completely incorrect, doesn't mention anything about " + "the question or is completely contrary to the correct answer. " + "- Score 1: the answer provides some relevance to the question and answer " + "one aspect of the question correctly. " + "- Score 2: the answer mostly answer the question but is missing or hallucinating " + "on one critical aspect. " + "- Score 3: the answer correctly answer the question and not missing any " + "major aspect" + ), + examples=[], + version="v1", + model="openai:/gpt-4", + grading_context_columns=["renamed_ground_truth"], + parameters={"temperature": 0.0}, + aggregations=["mean", "variance", "p90"], + greater_is_better=True, + ) + + with mock.patch.object( + model_utils, + "score_model_on_payload", + return_value=properly_formatted_openai_response1, + ): + with mlflow.start_run(): + eval_df = pd.DataFrame( + { + "inputs": [ + "What is MLflow?", + "What is Spark?", + "What is Python?", + ], + "ground_truth": [ + "MLflow is an open-source platform", + "Apache Spark is an open-source, distributed computing system", + "Python is a high-level programming language", + ], + "prediction": [ + "MLflow is an open-source platform", + "Apache Spark is an open-source, distributed computing system", + "Python is a high-level programming language", + ], + } + ) + results = mlflow.evaluate( + data=eval_df, + evaluators="default", + targets="renamed_ground_truth", + predictions="prediction", + extra_metrics=[metric], + evaluator_config={"col_mapping": {"renamed_ground_truth": "ground_truth"}}, + ) + + assert results.metrics == { + "correctness/v1/mean": 3.0, + "correctness/v1/variance": 0.0, + "correctness/v1/p90": 3.0, + } From 2d78a49150b8386f01412c6752c87c2cd59e6451 Mon Sep 17 00:00:00 2001 From: Chen Qian Date: Tue, 24 Oct 2023 17:13:11 -0700 Subject: [PATCH 079/101] Remove perplexity from default evaluator (#10086) Signed-off-by: chenmoneygithub --- docs/source/llms/llm-evaluate/index.rst | 7 ++--- docs/source/python_api/mlflow.metrics.rst | 4 +-- mlflow/metrics/__init__.py | 25 --------------- mlflow/metrics/metric_definitions.py | 19 ------------ mlflow/models/evaluation/base.py | 28 +++++------------ mlflow/models/evaluation/default_evaluator.py | 2 -- tests/evaluate/test_default_evaluator.py | 31 +++++-------------- tests/metrics/test_metric_definitions.py | 12 ------- 8 files changed, 20 insertions(+), 108 deletions(-) diff --git a/docs/source/llms/llm-evaluate/index.rst b/docs/source/llms/llm-evaluate/index.rst index 2d90df8399dcc..80fd49eb77d83 100644 --- a/docs/source/llms/llm-evaluate/index.rst +++ b/docs/source/llms/llm-evaluate/index.rst @@ -113,13 +113,12 @@ Select Metrics to Evaluate -------------------------- MLflow LLM evaluation includes default collections of metrics for pre-selected tasks, e.g, "question-answering". Depending on the -type of LLM use case that you are evaluating, these pre-defined collections can greatly simplify the process of running evaluations. +LLM use case that you are evaluating, these pre-defined collections can greatly simplify the process of running evaluations. The default metrics for given model types are shown below: * **question-answering**: ``model_type="question-answering"``: * exact-match - * `perplexity `_ :sup:`1` * `toxicity `_ :sup:`1` * `ari_grade_level `_ :sup:`2` * `flesch_kincaid_grade_level `_ :sup:`2` @@ -127,14 +126,12 @@ The default metrics for given model types are shown below: * **text-summarization**: ``model_type="text-summarization"``: * `ROUGE `_ :sup:`3` - * `perplexity `_ :sup:`1` * `toxicity `_ :sup:`1` * `ari_grade_level `_ :sup:`2` * `flesch_kincaid_grade_level `_ :sup:`2` * **text models**: ``model_type="text"``: - * `perplexity `_ :sup:`1` * `toxicity `_ :sup:`1` * `ari_grade_level `_ :sup:`2` * `flesch_kincaid_grade_level `_ :sup:`2` @@ -196,6 +193,8 @@ metrics: * :py:func:`mlflow.metrics.answer_similarity`: Evaluate the similarity between ground truth and your LLM outputs. * :py:func:`mlflow.metrics.answer_correctness`: Evaluate the correctness level of your LLM outputs based on given context and ground truth. +* :py:func:`mlflow.metrics.answer_relevance`: Evaluate the appropriateness and applicability of the output with + respect to the input. * :py:func:`mlflow.metrics.faithfulness`: Evaluate the faithfulness of your LLM outputs. diff --git a/docs/source/python_api/mlflow.metrics.rst b/docs/source/python_api/mlflow.metrics.rst index 3fa246f1bb4c5..7517e239f466b 100644 --- a/docs/source/python_api/mlflow.metrics.rst +++ b/docs/source/python_api/mlflow.metrics.rst @@ -78,8 +78,6 @@ We provide the following builtin factory functions to create :py:class:`Evaluati .. autofunction:: mlflow.metrics.flesch_kincaid_grade_level -.. autofunction:: mlflow.metrics.perplexity - .. autofunction:: mlflow.metrics.rouge1 .. autofunction:: mlflow.metrics.rouge2 @@ -120,4 +118,4 @@ When using LLM based :py:class:`EvaluationMetric EvaluationMetric: ) -@experimental -def perplexity() -> EvaluationMetric: - """ - This function will create a metric for evaluating `perplexity`_ using the model gpt2. - - The score ranges from 0 to infinity, where a lower score means that the model is better at - predicting the given text and a higher score means that the model is not likely to predict the - text. - - Aggregations calculated for this metric: - - mean - - .. _perplexity: https://huggingface.co/spaces/evaluate-metric/perplexity - """ - return make_metric( - eval_fn=_perplexity_eval_fn, - greater_is_better=False, - name="perplexity", - long_name="perplexity/gpt2", - version="v1", - ) - - @experimental def flesch_kincaid_grade_level() -> EvaluationMetric: """ @@ -401,7 +377,6 @@ def f1_score() -> EvaluationMetric: "EvaluationMetric", "MetricValue", "make_metric", - "perplexity", "flesch_kincaid_grade_level", "ari_grade_level", "accuracy", diff --git a/mlflow/metrics/metric_definitions.py b/mlflow/metrics/metric_definitions.py index 479f77004e1d5..079ebaba753f6 100644 --- a/mlflow/metrics/metric_definitions.py +++ b/mlflow/metrics/metric_definitions.py @@ -83,25 +83,6 @@ def _toxicity_eval_fn(predictions, targets=None, metrics=None): ) -def _perplexity_eval_fn(predictions, targets=None, metrics=None): - if not _validate_text_data(predictions, "perplexity", "predictions"): - return - - try: - perplexity = _cached_evaluate_load("perplexity", module_type="metric") - except Exception as e: - _logger.warning( - f"Failed to load 'perplexity' metric (error: {e!r}), skipping metric logging." - ) - return - - scores = perplexity.compute(predictions=predictions, model_id="gpt2")["perplexities"] - return MetricValue( - scores=scores, - aggregate_results=standard_aggregations(scores), - ) - - def _flesch_kincaid_eval_fn(predictions, targets=None, metrics=None): if not _validate_text_data(predictions, "flesch_kincaid", "predictions"): return diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 7a2c93e902ad2..be33db92c1017 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -1212,15 +1212,11 @@ def evaluate( precision_recall_auc), precision-recall merged curves plot, ROC merged curves plot. - For question-answering models, the default evaluator logs: - - **metrics**: ``exact_match``, ``token_count``, `mean_perplexity`_ (requires `evaluate`_, - `pytorch`_, `transformers`_), `toxicity_ratio`_ (requires `evaluate`_, `pytorch`_, - `mean_flesch_kincaid_grade_level`_ (requires `textstat`_). + - **metrics**: ``exact_match``, ``token_count``, `toxicity_ratio`_ (requires `evaluate`_, + `pytorch`_, `mean_flesch_kincaid_grade_level`_ (requires `textstat`_). - **artifacts**: A JSON file containing the inputs, outputs, targets (if the ``targets`` argument is supplied), and per-row metrics of the model in tabular format. - .. _mean_perplexity: - https://huggingface.co/spaces/evaluate-metric/perplexity - .. _toxicity_ratio: https://huggingface.co/spaces/evaluate-measurement/toxicity @@ -1244,19 +1240,15 @@ def evaluate( - For text-summarization models, the default evaluator logs: - **metrics**: ``token_count``, `ROUGE`_ (requires `evaluate`_, `nltk`_, and - `rouge_score`_ to be installed), `mean_perplexity`_ (requires `evaluate`_, `pytorch`_, - `transformers`_), `toxicity_ratio`_ (requires `evaluate`_, `pytorch`_, `transformers`_), - `mean_ari_grade_level`_ (requires `textstat`_), `mean_flesch_kincaid_grade_level`_ - (requires `textstat`_). + `rouge_score`_ to be installed), `toxicity_ratio`_ (requires `evaluate`_, `pytorch`_, + `transformers`_), `mean_ari_grade_level`_ (requires `textstat`_), + `mean_flesch_kincaid_grade_level`_ (requires `textstat`_). - **artifacts**: A JSON file containing the inputs, outputs, targets (if the ``targets`` argument is supplied), and per-row metrics of the model in the tabular format. .. _ROUGE: https://huggingface.co/spaces/evaluate-metric/rouge - .. _mean_perplexity: - https://huggingface.co/spaces/evaluate-metric/perplexity - .. _toxicity_ratio: https://huggingface.co/spaces/evaluate-measurement/toxicity @@ -1285,19 +1277,15 @@ def evaluate( https://pypi.org/project/textstat - For text models, the default evaluator logs: - - **metrics**: ``token_count``, `mean_perplexity`_ (requires `evaluate`_, `pytorch`_, - `transformers`_), `toxicity_ratio`_ (requires `evaluate`_, `pytorch`_, `transformers`_), - `mean_ari_grade_level`_ (requires `textstat`_), `mean_flesch_kincaid_grade_level`_ - (requires `textstat`_). + - **metrics**: ``token_count``, `toxicity_ratio`_ (requires `evaluate`_, `pytorch`_, + `transformers`_), `mean_ari_grade_level`_ (requires `textstat`_), + `mean_flesch_kincaid_grade_level`_ (requires `textstat`_). - **artifacts**: A JSON file containing the inputs, outputs, targets (if the ``targets`` argument is supplied), and per-row metrics of the model in tabular format. .. _evaluate: https://pypi.org/project/evaluate - .. _mean_perplexity: - https://huggingface.co/spaces/evaluate-metric/perplexity - .. _toxicity_ratio: https://huggingface.co/spaces/evaluate-measurement/toxicity diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index f00b4efa76d02..cf168fb8152c1 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -30,7 +30,6 @@ ari_grade_level, exact_match, flesch_kincaid_grade_level, - perplexity, rouge1, rouge2, rougeL, @@ -1633,7 +1632,6 @@ def _evaluate( text_metrics = [ token_count(), toxicity(), - perplexity(), flesch_kincaid_grade_level(), ari_grade_level(), ] diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index d36fc221867c5..a961b8f435cc2 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -2151,7 +2151,6 @@ def validate_question_answering_logged_data( "toxicity/v1/score", "flesch_kincaid_grade_level/v1/score", "ari_grade_level/v1/score", - "perplexity/v1/score", "token_count", } if with_targets: @@ -2163,7 +2162,6 @@ def validate_question_answering_logged_data( assert logged_data[predictions_name].tolist() == ["words random", "This is a sentence."] assert logged_data["toxicity/v1/score"][0] < 0.5 assert logged_data["toxicity/v1/score"][1] < 0.5 - assert logged_data["perplexity/v1/score"][0] > logged_data["perplexity/v1/score"][1] assert all( isinstance(grade, float) for grade in logged_data["flesch_kincaid_grade_level/v1/score"] ) @@ -2307,13 +2305,10 @@ def test_evaluate_question_answering_on_static_dataset_with_targets(): validate_question_answering_logged_data(logged_data, predictions_name="pred") assert set(results.metrics.keys()) == { "toxicity/v1/variance", - "perplexity/v1/p90", - "perplexity/v1/variance", "toxicity/v1/ratio", "toxicity/v1/mean", "flesch_kincaid_grade_level/v1/variance", "ari_grade_level/v1/p90", - "perplexity/v1/mean", "flesch_kincaid_grade_level/v1/p90", "flesch_kincaid_grade_level/v1/mean", "ari_grade_level/v1/mean", @@ -2382,7 +2377,6 @@ def validate_text_summarization_logged_data(logged_data, with_targets=True): "toxicity/v1/score", "flesch_kincaid_grade_level/v1/score", "ari_grade_level/v1/score", - "perplexity/v1/score", "token_count", } if with_targets: @@ -2417,7 +2411,7 @@ def validate_text_summarization_logged_data(logged_data, with_targets=True): def get_text_metrics_keys(): - metric_names = ["perplexity", "toxicity", "flesch_kincaid_grade_level", "ari_grade_level"] + metric_names = ["toxicity", "flesch_kincaid_grade_level", "ari_grade_level"] standard_aggregations = ["mean", "variance", "p90"] version = "v1" @@ -2542,7 +2536,6 @@ def test_evaluate_text_summarization_fails_to_load_evaluate_metrics(): model_type="text-summarization", ) mock_load.assert_any_call("rouge") - mock_load.assert_any_call("perplexity", module_type="metric") mock_load.assert_any_call("toxicity", module_type="measurement") client = mlflow.MlflowClient() @@ -2584,7 +2577,6 @@ def test_evaluate_text_and_text_metrics(): "toxicity/v1/score", "flesch_kincaid_grade_level/v1/score", "ari_grade_level/v1/score", - "perplexity/v1/score", "token_count", } assert logged_data["text"].tolist() == ["sentence not", "All women are bad."] @@ -2592,8 +2584,6 @@ def test_evaluate_text_and_text_metrics(): # Hateful sentiments should be marked as toxic assert logged_data["toxicity/v1/score"][0] < 0.5 assert logged_data["toxicity/v1/score"][1] > 0.5 - # The perplexity of random words should be higher than a valid sentence. - assert logged_data["perplexity/v1/score"][0] > logged_data["perplexity/v1/score"][1] # Simple sentences should have a low grade level. assert logged_data["flesch_kincaid_grade_level/v1/score"][1] < 4 assert logged_data["ari_grade_level/v1/score"][1] < 4 @@ -2677,7 +2667,6 @@ def test_eval_results_table_json_can_be_prefixed_with_metric_prefix(metric_prefi f"{metric_prefix}toxicity/v1/score", f"{metric_prefix}flesch_kincaid_grade_level/v1/score", f"{metric_prefix}ari_grade_level/v1/score", - f"{metric_prefix}perplexity/v1/score", f"{metric_prefix}token_count", } @@ -2827,7 +2816,6 @@ def test_eval_df(predictions, targets, metrics, inputs, truth, context): "context", "token_count", "toxicity/v1/score", - "perplexity/v1/score", "flesch_kincaid_grade_level/v1/score", "ari_grade_level/v1/score", ] @@ -2858,18 +2846,19 @@ def test_evaluate_no_model_type_with_builtin_metric(): results = mlflow.evaluate( model_info.model_uri, data, - extra_metrics=[mlflow.metrics.perplexity()], + extra_metrics=[mlflow.metrics.toxicity()], ) assert results.metrics.keys() == { - "perplexity/v1/mean", - "perplexity/v1/variance", - "perplexity/v1/p90", + "toxicity/v1/mean", + "toxicity/v1/variance", + "toxicity/v1/p90", + "toxicity/v1/ratio", } assert len(results.tables) == 1 assert results.tables["eval_results_table"].columns.tolist() == [ "text", "outputs", - "perplexity/v1/score", + "toxicity/v1/score", ] @@ -2963,7 +2952,6 @@ def test_default_metrics_as_custom_metrics_static_dataset(): model_type="question-answering", custom_metrics=[ mlflow.metrics.flesch_kincaid_grade_level(), - mlflow.metrics.perplexity(), mlflow.metrics.ari_grade_level(), mlflow.metrics.toxicity(), mlflow.metrics.exact_match(), @@ -2974,7 +2962,7 @@ def test_default_metrics_as_custom_metrics_static_dataset(): client = mlflow.MlflowClient() artifacts = [a.path for a in client.list_artifacts(run.info.run_id)] assert "eval_results_table.json" in artifacts - for metric in ["toxicity", "perplexity", "ari_grade_level", "flesch_kincaid_grade_level"]: + for metric in ["toxicity", "ari_grade_level", "flesch_kincaid_grade_level"]: for measure in ["mean", "p90", "variance"]: assert f"{metric}/v1/{measure}" in results.metrics.keys() assert "exact_match/v1" in results.metrics.keys() @@ -3002,7 +2990,6 @@ def test_multi_output_model_error_handling(): model_type="question-answering", custom_metrics=[ mlflow.metrics.flesch_kincaid_grade_level(), - mlflow.metrics.perplexity(), mlflow.metrics.ari_grade_level(), mlflow.metrics.toxicity(), mlflow.metrics.exact_match(), @@ -3055,7 +3042,6 @@ def test_evaluate_with_latency(): "toxicity/v1/score", "flesch_kincaid_grade_level/v1/score", "ari_grade_level/v1/score", - "perplexity/v1/score", "latency", "token_count", } @@ -3091,7 +3077,6 @@ def test_evaluate_with_latency_static_dataset(): "toxicity/v1/score", "flesch_kincaid_grade_level/v1/score", "ari_grade_level/v1/score", - "perplexity/v1/score", "latency", "token_count", } diff --git a/tests/metrics/test_metric_definitions.py b/tests/metrics/test_metric_definitions.py index 5b63b5299f8da..31bfe8232d789 100644 --- a/tests/metrics/test_metric_definitions.py +++ b/tests/metrics/test_metric_definitions.py @@ -13,7 +13,6 @@ mape, max_error, mse, - perplexity, precision_score, r2_score, recall_score, @@ -32,7 +31,6 @@ ari_grade_level(), exact_match(), flesch_kincaid_grade_level(), - perplexity(), rouge1(), rouge2(), rougeL(), @@ -70,16 +68,6 @@ def test_toxicity(): assert "variance" in result.aggregate_results -def test_perplexity(): - predictions = pd.Series(["sentence not", "This is a sentence"]) - result = perplexity().eval_fn(predictions, None, {}) - # A properly structured sentence should have lower perplexity - assert result.scores[0] > result.scores[1] - assert result.aggregate_results["mean"] == (result.scores[0] + result.scores[1]) / 2 - assert result.scores[0] > result.aggregate_results["p90"] > result.scores[1] - assert "variance" in result.aggregate_results - - def test_flesch_kincaid_grade_level(): predictions = pd.Series( [ From 72e23a74385861ab8b92055445f161d83ec408f9 Mon Sep 17 00:00:00 2001 From: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com> Date: Wed, 25 Oct 2023 11:12:59 +0900 Subject: [PATCH 080/101] Set fail-fast:false for splitted CI tests (#10116) Signed-off-by: B-Step62 --- .github/workflows/master.yml | 4 ++++ .github/workflows/recipe.yml | 1 + 2 files changed, 5 insertions(+) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index ff21ed15ec764..953ea826d32e4 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -89,6 +89,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 120 strategy: + fail-fast: false matrix: group: [1, 2] include: @@ -227,6 +228,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 120 strategy: + fail-fast: false matrix: group: [1, 2] include: @@ -279,6 +281,7 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 120 strategy: + fail-fast: false matrix: group: [1, 2] include: @@ -329,6 +332,7 @@ jobs: runs-on: windows-latest timeout-minutes: 120 strategy: + fail-fast: false matrix: group: [1, 2] include: diff --git a/.github/workflows/recipe.yml b/.github/workflows/recipe.yml index 7e8b8618a4140..2ed6b04ead32a 100644 --- a/.github/workflows/recipe.yml +++ b/.github/workflows/recipe.yml @@ -53,6 +53,7 @@ jobs: if: github.event_name != 'pull_request' || github.event.pull_request.draft == false runs-on: windows-latest strategy: + fail-fast: false matrix: group: [1, 2] include: From f56f4080eac22a13b5a4a34722f705f404e27499 Mon Sep 17 00:00:00 2001 From: Daniel Lok Date: Wed, 25 Oct 2023 11:53:20 +0800 Subject: [PATCH 081/101] Set OpenAI API key on _OAITokenHolder init (#10095) Signed-off-by: Daniel Lok --- mlflow/openai/utils.py | 8 +++++++- tests/openai/test_openai_init.py | 26 ++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 tests/openai/test_openai_init.py diff --git a/mlflow/openai/utils.py b/mlflow/openai/utils.py index fc4c2441b6b4d..fdaef6d444085 100644 --- a/mlflow/openai/utils.py +++ b/mlflow/openai/utils.py @@ -158,7 +158,13 @@ def __init__(self, api_type): self._api_token = None self._credential = None self._is_azure_ad = api_type in ("azure_ad", "azuread") - self._key_configured = bool(openai.api_key) or "OPENAI_API_KEY" in os.environ + self._key_configured = bool(openai.api_key) + + # set the api key if it's not set. this is to deal with cases where the + # user sets the environment variable after importing the `openai` module + if not bool(openai.api_key) and "OPENAI_API_KEY" in os.environ: + openai.api_key = os.environ["OPENAI_API_KEY"] + self._key_configured = True if self._is_azure_ad and not self._key_configured: try: diff --git a/tests/openai/test_openai_init.py b/tests/openai/test_openai_init.py new file mode 100644 index 0000000000000..5c268f11ae2e0 --- /dev/null +++ b/tests/openai/test_openai_init.py @@ -0,0 +1,26 @@ +import os +from importlib import reload + +from mlflow.openai import _OAITokenHolder + + +def test_set_api_key_on_tokenholder_init(monkeypatch): + # if the user sets the API key after the openai module, + # expect `openai.api_key` to not be set. + monkeypatch.delenv("OPENAI_API_KEY", False) + assert "OPENAI_API_KEY" not in os.environ + + import openai + + monkeypatch.setenv("OPENAI_API_KEY", "test-key") + assert openai.api_key is None + + # when OAITokenHolder is initialized, expect it to set `openai.api_key` + token_holder = _OAITokenHolder("open_ai") + assert openai.api_key == "test-key" + assert token_holder._key_configured + + # reload the module to simulate the env var being set before + # load. in this case we'd expect the API key to be present + reload(openai) + assert openai.api_key == "test-key" From ceb6e51f1eeb9a7b5f93e1524fcf345b1cfb0378 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Wed, 25 Oct 2023 14:58:24 +0900 Subject: [PATCH 082/101] Rename `_insecure_md5` to `md5` (#10119) Signed-off-by: harupy --- mlflow/data/digest_utils.py | 4 ++-- mlflow/models/evaluation/base.py | 4 ++-- mlflow/store/tracking/file_store.py | 6 +++--- mlflow/utils/__init__.py | 20 -------------------- mlflow/utils/insecure_hash.py | 14 ++++++++++++++ tests/evaluate/test_evaluation.py | 4 ++-- tests/resources/data/dataset.py | 4 ++-- tests/store/tracking/test_file_store.py | 4 ++-- 8 files changed, 27 insertions(+), 33 deletions(-) create mode 100644 mlflow/utils/insecure_hash.py diff --git a/mlflow/data/digest_utils.py b/mlflow/data/digest_utils.py index 8f9fdf2fbfcab..bbaa02dc20041 100644 --- a/mlflow/data/digest_utils.py +++ b/mlflow/data/digest_utils.py @@ -4,7 +4,7 @@ from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE -from mlflow.utils import _insecure_md5 +from mlflow.utils import insecure_hash MAX_ROWS = 10000 @@ -159,7 +159,7 @@ def get_normalized_md5_digest(elements: List[Any]) -> str: INVALID_PARAMETER_VALUE, ) - md5 = _insecure_md5() + md5 = insecure_hash.md5() for element in elements: md5.update(element) diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index be33db92c1017..7bb603ed039ed 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -30,7 +30,7 @@ from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE from mlflow.tracking.artifact_utils import _download_artifact_from_uri from mlflow.tracking.client import MlflowClient -from mlflow.utils import _get_fully_qualified_class_name, _insecure_md5 +from mlflow.utils import _get_fully_qualified_class_name, insecure_hash from mlflow.utils.annotations import developer_stable, experimental from mlflow.utils.class_utils import _get_class_from_string from mlflow.utils.file_utils import TempDir @@ -605,7 +605,7 @@ def __init__( ) # generate dataset hash - md5_gen = _insecure_md5() + md5_gen = insecure_hash.md5() _gen_md5_for_arraylike_obj(md5_gen, self._features_data) if self._labels_data is not None: _gen_md5_for_arraylike_obj(md5_gen, self._labels_data) diff --git a/mlflow/store/tracking/file_store.py b/mlflow/store/tracking/file_store.py index afcfecc2d8423..a04f557879f1d 100644 --- a/mlflow/store/tracking/file_store.py +++ b/mlflow/store/tracking/file_store.py @@ -45,7 +45,7 @@ SEARCH_MAX_RESULTS_THRESHOLD, ) from mlflow.store.tracking.abstract_store import AbstractStore -from mlflow.utils import _insecure_md5, get_results_from_paginated_fn +from mlflow.utils import get_results_from_paginated_fn, insecure_hash from mlflow.utils.file_utils import ( append_to, exists, @@ -1127,13 +1127,13 @@ def log_inputs(self, run_id: str, datasets: Optional[List[DatasetInput]] = None) @staticmethod def _get_dataset_id(dataset_name: str, dataset_digest: str) -> str: - md5 = _insecure_md5(dataset_name.encode("utf-8")) + md5 = insecure_hash.md5(dataset_name.encode("utf-8")) md5.update(dataset_digest.encode("utf-8")) return md5.hexdigest() @staticmethod def _get_input_id(dataset_id: str, run_id: str) -> str: - md5 = _insecure_md5(dataset_id.encode("utf-8")) + md5 = insecure_hash.md5(dataset_id.encode("utf-8")) md5.update(run_id.encode("utf-8")) return md5.hexdigest() diff --git a/mlflow/utils/__init__.py b/mlflow/utils/__init__.py index a81e70bd29b5f..fd2563cefe3be 100644 --- a/mlflow/utils/__init__.py +++ b/mlflow/utils/__init__.py @@ -1,10 +1,8 @@ import base64 -import hashlib import inspect import logging import socket import subprocess -import sys import uuid from contextlib import closing from itertools import islice @@ -270,21 +268,3 @@ def get_results_from_paginated_fn(paginated_fn, max_results_per_page, max_result else: break return all_results - - -def _insecure_md5(string=b""): - """ - Do not use this function for security purposes (e.g., password hashing). - - In Python >= 3.9, `hashlib.md5` fails in FIPS-compliant environments. This function - provides a workaround for this issue by using `hashlib.md5` with `usedforsecurity=False`. - - References: - - https://github.com/mlflow/mlflow/issues/9905 - - https://docs.python.org/3/library/hashlib.html - """ - return ( - hashlib.md5(string, usedforsecurity=False) - if sys.version_info >= (3, 9) - else hashlib.md5(string) - ) diff --git a/mlflow/utils/insecure_hash.py b/mlflow/utils/insecure_hash.py new file mode 100644 index 0000000000000..b8ae6c0e7b46e --- /dev/null +++ b/mlflow/utils/insecure_hash.py @@ -0,0 +1,14 @@ +import functools +import hashlib +import sys + +# DO NOT use this function for security purposes (e.g., password hashing). +# +# In Python >= 3.9, insecure hashing algorithms such as MD5 fail in FIPS-compliant +# environments unless `usedforsecurity=False` is explicitly passed. +# +# References: +# - https://github.com/mlflow/mlflow/issues/9905 +# - https://docs.python.org/3/library/hashlib.html +_kwargs = {"usedforsecurity": False} if sys.version_info >= (3, 9) else {} +md5 = functools.partial(hashlib.md5, **_kwargs) diff --git a/tests/evaluate/test_evaluation.py b/tests/evaluate/test_evaluation.py index 5b2c7a1fc20d4..01d4d603bef97 100644 --- a/tests/evaluate/test_evaluation.py +++ b/tests/evaluate/test_evaluation.py @@ -55,7 +55,7 @@ from mlflow.pyfunc import _ServedPyFuncModel from mlflow.pyfunc.scoring_server.client import ScoringServerClient from mlflow.tracking.artifact_utils import get_artifact_uri -from mlflow.utils import _insecure_md5 +from mlflow.utils import insecure_hash from mlflow.utils.file_utils import TempDir @@ -690,7 +690,7 @@ def test_dataset_metadata(): def test_gen_md5_for_arraylike_obj(): def get_md5(data): - md5_gen = _insecure_md5() + md5_gen = insecure_hash.md5() _gen_md5_for_arraylike_obj(md5_gen, data) return md5_gen.hexdigest() diff --git a/tests/resources/data/dataset.py b/tests/resources/data/dataset.py index b09e2d3d5df3a..486e9809d5fdd 100644 --- a/tests/resources/data/dataset.py +++ b/tests/resources/data/dataset.py @@ -8,7 +8,7 @@ from mlflow.data.dataset import Dataset from mlflow.types import Schema from mlflow.types.utils import _infer_schema -from mlflow.utils import _insecure_md5 +from mlflow.utils import insecure_hash from tests.resources.data.dataset_source import TestDatasetSource @@ -29,7 +29,7 @@ def _compute_digest(self) -> str: Computes a digest for the dataset. Called if the user doesn't supply a digest when constructing the dataset. """ - hash_md5 = _insecure_md5() + hash_md5 = insecure_hash.md5() for hash_part in pd.util.hash_array(np.array(self._data_list)): hash_md5.update(hash_part) return base64.b64encode(hash_md5.digest()).decode("ascii") diff --git a/tests/store/tracking/test_file_store.py b/tests/store/tracking/test_file_store.py index 52655419c930d..98407a580d20e 100644 --- a/tests/store/tracking/test_file_store.py +++ b/tests/store/tracking/test_file_store.py @@ -37,7 +37,7 @@ from mlflow.store.entities.paged_list import PagedList from mlflow.store.tracking import SEARCH_MAX_RESULTS_DEFAULT from mlflow.store.tracking.file_store import FileStore -from mlflow.utils import _insecure_md5 +from mlflow.utils import insecure_hash from mlflow.utils.file_utils import TempDir, path_to_local_file_uri, read_yaml, write_yaml from mlflow.utils.mlflow_tags import MLFLOW_DATASET_CONTEXT, MLFLOW_LOGGED_MODELS, MLFLOW_RUN_NAME from mlflow.utils.name_utils import _EXPERIMENT_ID_FIXED_WIDTH, _GENERATOR_PREDICATES @@ -2493,7 +2493,7 @@ def assert_expected_input_storage_ids_present(run, dataset_storage_ids): inputs_dir = os.path.join(run_dir, FileStore.INPUTS_FOLDER_NAME) expected_input_storage_ids = [] for dataset_storage_id in dataset_storage_ids: - md5 = _insecure_md5(dataset_storage_id.encode("utf-8")) + md5 = insecure_hash.md5(dataset_storage_id.encode("utf-8")) md5.update(run.info.run_id.encode("utf-8")) expected_input_storage_ids.append(md5.hexdigest()) assert set(os.listdir(inputs_dir)) == set(expected_input_storage_ids) From 5449b23729e84469706dbc4f9a33784bababb635 Mon Sep 17 00:00:00 2001 From: Prithvi Kannan <46332835+prithvikannan@users.noreply.github.com> Date: Tue, 24 Oct 2023 23:05:23 -0700 Subject: [PATCH 083/101] Add stacktrace if model scoring fails (#10115) Signed-off-by: Prithvi Kannan --- mlflow/metrics/genai/genai_metric.py | 7 ++-- tests/metrics/genai/test_genai_metrics.py | 49 ++++++++++++++++++----- 2 files changed, 41 insertions(+), 15 deletions(-) diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index baa23028a24b4..da635f14d2f14 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -72,10 +72,10 @@ def _extract_score_and_justification(output): justification = match.group(2) else: score = None - justification = None + justification = f"Failed to extract score and justification. Raw output: {output}" if not isinstance(score, (int, float)) or not isinstance(justification, str): - return None, None + return None, f"Failed to extract score and justification. Raw output: {output}" return score, justification @@ -282,8 +282,7 @@ def score_model_on_one_payload( ErrorCode.Name(UNAUTHENTICATED), ]: raise MlflowException(e) - _logger.info(f"Failed to score model on payload. Error: {e!r}") - return None, None + return None, f"Failed to score model on payload. Error: {e!s}" scores = [None] * len(inputs) justifications = [None] * len(inputs) diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index b4800327cd59c..88153dd778f47 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -297,7 +297,31 @@ def test_make_genai_metric_incorrect_response(): ) assert metric_value.scores == [None] - assert metric_value.justifications == [None] + assert metric_value.justifications == [ + f"Failed to extract score and justification. Raw output:" + f" {incorrectly_formatted_openai_response}" + ] + + assert np.isnan(metric_value.aggregate_results["mean"]) + assert np.isnan(metric_value.aggregate_results["variance"]) + assert metric_value.aggregate_results["p90"] is None + + with mock.patch.object( + model_utils, + "score_model_on_payload", + side_effect=Exception("Some error occurred"), + ): + metric_value = custom_metric.eval_fn( + pd.Series([mlflow_prediction]), + {}, + pd.Series(["What is MLflow?"]), + pd.Series([mlflow_ground_truth]), + ) + + assert metric_value.scores == [None] + assert metric_value.justifications == [ + "Failed to score model on payload. Error: Some error occurred" + ] assert np.isnan(metric_value.aggregate_results["mean"]) assert np.isnan(metric_value.aggregate_results["variance"]) @@ -508,18 +532,21 @@ def test_extract_score_and_justification(): assert score4 == 4 assert justification4 == "This is a justification" - score5, justification5 = _extract_score_and_justification( - output={ - "candidates": [ - { - "text": '{"score": 4, "justification": {"foo": "bar"}}', - } - ] - } - ) + malformed_output = { + "candidates": [ + { + "text": '{"score": 4, "justification": {"foo": "bar"}}', + } + ] + } + + score5, justification5 = _extract_score_and_justification(output=malformed_output) assert score5 is None - assert justification5 is None + assert ( + justification5 + == f"Failed to extract score and justification. Raw output: {malformed_output}" + ) def test_correctness_metric(): From d132806a51a0ef302714e0fbd4588a7d1944f8a9 Mon Sep 17 00:00:00 2001 From: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com> Date: Wed, 25 Oct 2023 17:08:46 +0900 Subject: [PATCH 084/101] Address sha1 security compliance (#10121) Signed-off-by: B-Step62 --- mlflow/pyfunc/__init__.py | 4 ++-- mlflow/utils/conda.py | 7 +++---- mlflow/utils/environment.py | 5 ++--- mlflow/utils/insecure_hash.py | 1 + pyproject.toml | 1 + tests/projects/test_projects_cli.py | 7 ++++--- 6 files changed, 13 insertions(+), 12 deletions(-) diff --git a/mlflow/pyfunc/__init__.py b/mlflow/pyfunc/__init__.py index 7462cb13ace7f..bf77a350f5a91 100644 --- a/mlflow/pyfunc/__init__.py +++ b/mlflow/pyfunc/__init__.py @@ -209,7 +209,6 @@ import collections import functools -import hashlib import importlib import inspect import logging @@ -269,6 +268,7 @@ check_port_connectivity, find_free_port, get_major_minor_py_version, + insecure_hash, ) from mlflow.utils import env_manager as _EnvManager from mlflow.utils.annotations import deprecated, experimental @@ -1583,7 +1583,7 @@ def batch_predict_fn(pdf, params=None): model_path = os.path.join( tempfile.gettempdir(), "mlflow", - hashlib.sha1(model_uri.encode()).hexdigest(), + insecure_hash.sha1(model_uri.encode()).hexdigest(), ) try: loaded_model = mlflow.pyfunc.load_model(model_path) diff --git a/mlflow/utils/conda.py b/mlflow/utils/conda.py index 522605acd1736..b22681a2ad56e 100644 --- a/mlflow/utils/conda.py +++ b/mlflow/utils/conda.py @@ -1,4 +1,3 @@ -import hashlib import json import logging import os @@ -7,7 +6,7 @@ from mlflow.environment_variables import MLFLOW_CONDA_CREATE_ENV_CMD, MLFLOW_CONDA_HOME from mlflow.exceptions import ExecutionException -from mlflow.utils import process +from mlflow.utils import insecure_hash, process from mlflow.utils.environment import Environment _logger = logging.getLogger(__name__) @@ -61,12 +60,12 @@ def _get_conda_env_name(conda_env_path, env_id=None, env_root_dir=None): if env_id: conda_env_contents += env_id - env_name = "mlflow-%s" % hashlib.sha1(conda_env_contents.encode("utf-8")).hexdigest() + env_name = "mlflow-%s" % insecure_hash.sha1(conda_env_contents.encode("utf-8")).hexdigest() if env_root_dir: env_root_dir = os.path.normpath(env_root_dir) # Generate env name with format "mlflow-{conda_env_contents_hash}-{env_root_dir_hash}" # hashing `conda_env_contents` and `env_root_dir` separately helps debugging - env_name += "-%s" % hashlib.sha1(env_root_dir.encode("utf-8")).hexdigest() + env_name += "-%s" % insecure_hash.sha1(env_root_dir.encode("utf-8")).hexdigest() return env_name diff --git a/mlflow/utils/environment.py b/mlflow/utils/environment.py index d530add4ae2a6..0eb052cdf7d79 100644 --- a/mlflow/utils/environment.py +++ b/mlflow/utils/environment.py @@ -1,4 +1,3 @@ -import hashlib import logging import os import re @@ -10,7 +9,7 @@ from mlflow.exceptions import MlflowException from mlflow.protos.databricks_pb2 import INVALID_PARAMETER_VALUE -from mlflow.utils import PYTHON_VERSION +from mlflow.utils import PYTHON_VERSION, insecure_hash from mlflow.utils.process import _exec_cmd from mlflow.utils.requirements_utils import ( _infer_requirements, @@ -557,7 +556,7 @@ def _get_mlflow_env_name(s): :returns: String in the form of "mlflow-{hash}" (e.g. "mlflow-da39a3ee5e6b4b0d3255bfef95601890afd80709") """ - return "mlflow-" + hashlib.sha1(s.encode("utf-8")).hexdigest() + return "mlflow-" + insecure_hash.sha1(s.encode("utf-8")).hexdigest() def _get_pip_install_mlflow(): diff --git a/mlflow/utils/insecure_hash.py b/mlflow/utils/insecure_hash.py index b8ae6c0e7b46e..7807a6d2e4ff3 100644 --- a/mlflow/utils/insecure_hash.py +++ b/mlflow/utils/insecure_hash.py @@ -12,3 +12,4 @@ # - https://docs.python.org/3/library/hashlib.html _kwargs = {"usedforsecurity": False} if sys.version_info >= (3, 9) else {} md5 = functools.partial(hashlib.md5, **_kwargs) +sha1 = functools.partial(hashlib.sha1, **_kwargs) diff --git a/pyproject.toml b/pyproject.toml index 8feaec189b042..3a6cb66174842 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,6 +40,7 @@ select = [ "RUF010", "RUF013", "S307", + "S324", "UP004", "UP008", "UP011", diff --git a/tests/projects/test_projects_cli.py b/tests/projects/test_projects_cli.py index dd7f99876304d..f123a7f3fd63f 100644 --- a/tests/projects/test_projects_cli.py +++ b/tests/projects/test_projects_cli.py @@ -1,4 +1,3 @@ -import hashlib import json import logging import os @@ -9,7 +8,7 @@ from click.testing import CliRunner from mlflow import MlflowClient, cli -from mlflow.utils import process +from mlflow.utils import insecure_hash, process from tests.integration.utils import invoke_cli_runner from tests.projects.utils import ( @@ -90,7 +89,9 @@ def clean_mlruns_dir(): def test_run_local_conda_env(): with open(os.path.join(TEST_PROJECT_DIR, "conda.yaml")) as handle: conda_env_contents = handle.read() - expected_env_name = "mlflow-%s" % hashlib.sha1(conda_env_contents.encode("utf-8")).hexdigest() + expected_env_name = ( + "mlflow-%s" % insecure_hash.sha1(conda_env_contents.encode("utf-8")).hexdigest() + ) try: process._exec_cmd(cmd=["conda", "env", "remove", "--name", expected_env_name]) except process.ShellCommandException: From a7681d54ae702aa79ba5c4b92df270767c43d6f8 Mon Sep 17 00:00:00 2001 From: Bryan Qiu <55931436+bbqiu@users.noreply.github.com> Date: Wed, 25 Oct 2023 01:37:50 -0700 Subject: [PATCH 085/101] remove inline-block highlighted class (#10123) Signed-off-by: Bryan Qiu --- docs/theme/mlflow/static/css/theme.css | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/theme/mlflow/static/css/theme.css b/docs/theme/mlflow/static/css/theme.css index f5eb1d889a226..2516c81845aca 100644 --- a/docs/theme/mlflow/static/css/theme.css +++ b/docs/theme/mlflow/static/css/theme.css @@ -5118,7 +5118,6 @@ footer span.commit .rst-content tt, } .rst-content .highlighted { background: #F1C40F; - display: inline-block; font-weight: bold; padding: 0 6px } From ea0f7cd6a19a7026da4194fcfba5fcc04a59c610 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Wed, 25 Oct 2023 22:41:51 +0900 Subject: [PATCH 086/101] Unpin pyspark (#10126) Signed-off-by: harupy --- .github/workflows/master.yml | 5 +---- .github/workflows/recipe.yml | 7 ++----- mlflow/utils/_spark_utils.py | 2 +- tests/data/test_delta_dataset_source.py | 2 +- tests/data/test_pandas_dataset.py | 2 +- tests/data/test_spark_dataset.py | 2 +- tests/data/test_spark_dataset_source.py | 2 +- tests/recipes/test_ingest_step.py | 2 +- tests/recipes/test_predict_step.py | 2 +- 9 files changed, 10 insertions(+), 16 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 953ea826d32e4..2dd5fe3269726 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -109,8 +109,6 @@ jobs: python -m venv .venv source .venv/bin/activate source ./dev/install-common-deps.sh --ml - # pyspark 3.5 is incompatible with delta 2.4 - pip install 'pyspark<3.5' - uses: ./.github/actions/pipdeptree - name: Import check run: | @@ -353,8 +351,7 @@ jobs: pip install -r requirements/test-requirements.txt pip install --no-dependencies tests/resources/mlflow-test-plugin pip install -e .[extras] - # pyspark 3.5 is incompatible with delta 2.4 - pip install 'pyspark<3.5' + pip install pyspark pip install mleap # Install Hugging Face datasets to test Hugging Face usage with MLflow dataset tracking pip install datasets diff --git a/.github/workflows/recipe.yml b/.github/workflows/recipe.yml index 2ed6b04ead32a..a3565e42e72f3 100644 --- a/.github/workflows/recipe.yml +++ b/.github/workflows/recipe.yml @@ -42,9 +42,7 @@ jobs: - name: Install dependencies run: | source ./dev/install-common-deps.sh - pip install -e . - # pyspark 3.5 is incompatible with delta 2.4 - pip install 'pyspark<3.5' + pip install pyspark - name: Run tests run: | pytest tests/recipes @@ -70,8 +68,7 @@ jobs: pip install -r requirements/test-requirements.txt pip install --no-dependencies tests/resources/mlflow-test-plugin pip install -e . - # pyspark 3.5 is incompatible with delta 2.4 - pip install 'pyspark<3.5' + pip install pyspark # TODO: Importing datasets in a pandas UDF (created by mlflow.pyfunc.spark_udf) crashes # the Python worker. To avoid this, uninstall `datasets`. This is a temporary workaround. pip uninstall -y datasets diff --git a/mlflow/utils/_spark_utils.py b/mlflow/utils/_spark_utils.py index 239ce113f3763..80a2b417deedc 100644 --- a/mlflow/utils/_spark_utils.py +++ b/mlflow/utils/_spark_utils.py @@ -46,7 +46,7 @@ def _create_local_spark_session_for_recipes(): _prepare_subprocess_environ_for_creating_local_spark_session() return ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_delta_dataset_source.py b/tests/data/test_delta_dataset_source.py index e9d8e4f9f766e..b199e558b1d74 100644 --- a/tests/data/test_delta_dataset_source.py +++ b/tests/data/test_delta_dataset_source.py @@ -14,7 +14,7 @@ def spark_session(): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_pandas_dataset.py b/tests/data/test_pandas_dataset.py index 2a452593e3e26..c8244eabbb2a7 100644 --- a/tests/data/test_pandas_dataset.py +++ b/tests/data/test_pandas_dataset.py @@ -24,7 +24,7 @@ def spark_session(): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_spark_dataset.py b/tests/data/test_spark_dataset.py index ae2fc350587b4..2283f4bf30ca9 100644 --- a/tests/data/test_spark_dataset.py +++ b/tests/data/test_spark_dataset.py @@ -21,7 +21,7 @@ def spark_session(tmp_path): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_spark_dataset_source.py b/tests/data/test_spark_dataset_source.py index 7b68370f2696f..78c0e0ccfee9b 100644 --- a/tests/data/test_spark_dataset_source.py +++ b/tests/data/test_spark_dataset_source.py @@ -14,7 +14,7 @@ def spark_session(): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/recipes/test_ingest_step.py b/tests/recipes/test_ingest_step.py index a5799d8f74131..4da8f2f9f5f4d 100644 --- a/tests/recipes/test_ingest_step.py +++ b/tests/recipes/test_ingest_step.py @@ -35,7 +35,7 @@ def spark_session(): with tempfile.TemporaryDirectory() as tmpdir: with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/recipes/test_predict_step.py b/tests/recipes/test_predict_step.py index 60bb8d3846879..987bfabfc5298 100644 --- a/tests/recipes/test_predict_step.py +++ b/tests/recipes/test_predict_step.py @@ -28,7 +28,7 @@ def spark_session(): with tempfile.TemporaryDirectory() as tmpdir: with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" From c468b1969aac78d3882153f5a4780d608bb12d2a Mon Sep 17 00:00:00 2001 From: Liang Zhang Date: Wed, 25 Oct 2023 09:24:34 -0700 Subject: [PATCH 087/101] Retriever model-type with precision_at_k built-in metric (#10079) Signed-off-by: Liang Zhang Signed-off-by: Bryan Qiu Co-authored-by: Bryan Qiu <55931436+bbqiu@users.noreply.github.com> --- docs/source/python_api/mlflow.metrics.rst | 2 + mlflow/metrics/__init__.py | 50 ++++ mlflow/metrics/metric_definitions.py | 44 ++++ mlflow/models/evaluation/base.py | 21 +- mlflow/models/evaluation/default_evaluator.py | 30 ++- tests/evaluate/test_default_evaluator.py | 216 ++++++++++++++++++ 6 files changed, 355 insertions(+), 8 deletions(-) diff --git a/docs/source/python_api/mlflow.metrics.rst b/docs/source/python_api/mlflow.metrics.rst index 7517e239f466b..7903cc343f49a 100644 --- a/docs/source/python_api/mlflow.metrics.rst +++ b/docs/source/python_api/mlflow.metrics.rst @@ -86,6 +86,8 @@ We provide the following builtin factory functions to create :py:class:`Evaluati .. autofunction:: mlflow.metrics.rougeLsum +.. autofunction:: mlflow.metrics.precision_at_k + .. autofunction:: mlflow.metrics.toxicity .. autofunction:: mlflow.metrics.token_count diff --git a/mlflow/metrics/__init__.py b/mlflow/metrics/__init__.py index 010f777fa8c8d..2c6f1d7770bff 100644 --- a/mlflow/metrics/__init__.py +++ b/mlflow/metrics/__init__.py @@ -20,6 +20,7 @@ _mape_eval_fn, _max_error_eval_fn, _mse_eval_fn, + _precision_at_k_eval_fn, _precision_eval_fn, _r2_score_eval_fn, _recall_eval_fn, @@ -240,6 +241,55 @@ def rougeLsum() -> EvaluationMetric: ) +@experimental +def precision_at_k(k) -> EvaluationMetric: + """ + This function will create a metric for calculating ``precision_at_k`` for retriever models. + + For retriever models, it's recommended to use a static dataset represented by a Pandas + Dataframe or an MLflow Pandas Dataset containing the input queries, retrieved relevant + document IDs, and the ground-truth relevant document IDs for the evaluation. A + "document ID" should be a string that identifies a document. For each row, the retrieved + relevant document IDs and the ground-truth relevant document IDs should be provided as + a tuple of document ID strings. The column name of the retrieved relevant document IDs + should be specified by the ``predictions`` parameter, and the column name of the + ground-truth relevant document IDs should be specified by the ``targets`` parameter. + Alternatively, you can use a function that returns a tuple of document ID strings for + the evaluation. The function should take a Pandas DataFrame as input and return a Pandas + DataFrame with the same number of rows, where each row contains a tuple of document ID + strings. The output column name of the function should be specified by the ``predictions`` + parameter. + + This metric computes a score between 0 and 1 for each row representing the precision of the + retriever model at the given k value. The score is calculated by dividing the number of relevant + documents retrieved by the total number of documents retrieved or k, whichever is smaller. + If no relevant documents are retrieved, the score is 1, indication that no false positives were + retrieved. + + The model output should be a pandas dataframe with a column containing a tuple of strings on + each row. The strings in the tuple represent the document IDs. + The label column should contain a tuple of strings representing the relevant + document IDs for each row, provided by the input ``data`` parameter. + The ``k`` parameter should be a positive integer representing the number of retrieved documents + to evaluate for each row. ``k`` defaults to 3. + + This metric is a default metric for the ``retriever`` model type. + When the model type is ``"retriever"``, this metric will be calculated automatically with the + default ``k`` value of 3. To use another ``k`` value, use the ``evaluator_config`` parameter + in the ``mlflow.evaluate()`` API as follows: ``evaluator_config={"k": }``. + Alternatively, you can directly specify the ``mlflow.metrics.precision_at_k()`` metric + in the ``extra_metrics`` parameter of the ``mlflow.evaluate()`` API without specifying a model + type. In this case, the ``k`` value specified in the ``evaluator_config`` parameter will be + ignored. + """ + return make_metric( + eval_fn=_precision_at_k_eval_fn(k), + greater_is_better=True, + name="precision_at_k", + version="v1", + ) + + # General Regression Metrics def mae() -> EvaluationMetric: """ diff --git a/mlflow/metrics/metric_definitions.py b/mlflow/metrics/metric_definitions.py index 079ebaba753f6..0cf86da775062 100644 --- a/mlflow/metrics/metric_definitions.py +++ b/mlflow/metrics/metric_definitions.py @@ -33,6 +33,28 @@ def _validate_text_data(data, metric_name, column_name): return True +def _validate_and_fix_text_tuple_data(data, metric_name, column_name): + """Validates that the data is a pandas Series of a tuple of strings and is non-empty""" + if data is None or len(data) == 0: + return False + + for index, value in data.items(): + if not isinstance(value, tuple) or not all(isinstance(val, str) for val in value): + if isinstance(value, str): + # Single entry tuples get unpacked. + # So if the entry is a string, put them back into a tuple. + data[index] = (value,) + else: + _logger.warning( + f"Cannot calculate metric '{metric_name}' for non-tuple[str] inputs. " + f"Row #{index} of column '{column_name}' has a non-tuple[str] value of:" + f"{value}. Skipping metric logging." + ) + return False + + return True + + def _token_count_eval_fn(predictions, targets=None, metrics=None): import tiktoken @@ -325,3 +347,25 @@ def _f1_score_eval_fn( sample_weight=sample_weight, ) return MetricValue(aggregate_results={"f1_score": f1}) + + +def _precision_at_k_eval_fn(k): + def _fn(predictions, targets): + if not _validate_and_fix_text_tuple_data( + predictions, "precision_at_k", "predictions" + ) or not _validate_and_fix_text_tuple_data(targets, "precision_at_k", "targets"): + return + + scores = [] + for i in range(len(predictions)): + # only include the top k retrieved chunks + ground_truth, retrieved = set(targets[i]), predictions[i][:k] + relevant_doc_count = sum(1 for doc in retrieved if doc in ground_truth) + if len(retrieved) > 0: + scores.append(relevant_doc_count / len(retrieved)) + else: + scores.append(1) + + return MetricValue(scores=scores, aggregate_results=standard_aggregations(scores)) + + return _fn diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index 7bb603ed039ed..fa67531c897b0 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -54,7 +54,7 @@ class _ModelType: QUESTION_ANSWERING = "question-answering" TEXT_SUMMARIZATION = "text-summarization" TEXT = "text" - # TODO: Add 'retrieval' model type + RETRIEVER = "retriever" def __init__(self): raise NotImplementedError("This class is not meant to be instantiated.") @@ -67,6 +67,7 @@ def values(cls): cls.QUESTION_ANSWERING, cls.TEXT_SUMMARIZATION, cls.TEXT, + cls.RETRIEVER, ) @@ -1159,7 +1160,7 @@ def predict(self, context, model_input: pd.DataFrame): def evaluate( - model: Optional[str] = None, + model=None, data=None, *, model_type: Optional[str] = None, @@ -1304,6 +1305,13 @@ def evaluate( .. _textstat: https://pypi.org/project/textstat + - For retriever models, the default evaluator logs: + - **metrics**: ``precision_at_k``: precision at k with the default value of k = 3. To use + a different value of k, specify the ``evaluator_config`` parameter to include ``"k"``: + ``evaluator_config={"k":5}``. + - **artifacts**: A JSON file containing the inputs, outputs, targets, and per-row metrics + of the model in tabular format. + - For sklearn models, the default evaluator additionally logs the model's evaluation criterion (e.g. mean accuracy for a classifier) computed by `model.score` method. @@ -1347,6 +1355,9 @@ def evaluate( metrics. - **col_mapping**: A dictionary mapping column names in the input dataset or output predictions to column names used when invoking the evaluation functions. + - **k**: The number of top retrieved documents to use when computing the built-in metric + precision_at_k for model type "retriever". Default value is 3. For other model types, + this parameter will be ignored. - Limitations of evaluation dataset: - For classification tasks, dataset labels are used to infer the total number of classes. @@ -1466,13 +1477,15 @@ def model(inputs): - ``'question-answering'`` - ``'text-summarization'`` - ``'text'`` + - ``'retriever'`` If no ``model_type`` is specified, then you must provide a a list of metrics to compute via the ``extra_metrics`` param. .. note:: - ``'question-answering'``, ``'text-summarization'``, and ``'text'`` - are experimental and may be changed or removed in a future release. + ``'question-answering'``, ``'text-summarization'``, ``'text'``, and + ``'retriever'`` are experimental and may be changed or removed in a + future release. :param dataset_path: (Optional) The path where the data is stored. Must not contain double quotes (``“``). If specified, the path is logged to the ``mlflow.datasets`` diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index cf168fb8152c1..a0d4f576424f3 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -9,6 +9,7 @@ import shutil import tempfile import time +import traceback import warnings from collections import namedtuple from functools import partial @@ -30,6 +31,7 @@ ari_grade_level, exact_match, flesch_kincaid_grade_level, + precision_at_k, rouge1, rouge2, rougeL, @@ -1154,6 +1156,8 @@ def _get_args_for_metrics(self, extra_metric, eval_df): params_not_found = [] # eval_fn has parameters (eval_df, builtin_metrics) for backwards compatibility if len(parameters) == 2: + param_0_name, param_1_name = parameters.keys() + if len(parameters) == 2 and param_0_name != "predictions" and param_1_name != "targets": eval_fn_args.append(eval_df_copy) eval_fn_args.append(copy.deepcopy(self.metrics)) # eval_fn can have parameters like (predictions, targets, metrics, random_col) @@ -1475,20 +1479,26 @@ def _test_first_row(self, eval_df): name = f"{metric.name}/{metric.version}" if metric.version else metric.name self.metrics_values.update({name: metric_value}) except Exception as e: + stacktrace_str = traceback.format_exc() if isinstance(e, MlflowException): - exceptions.append(f"Metric '{metric.name}': Error:\n{e.message}") + exceptions.append( + f"Metric '{metric.name}': Error:\n{e.message}\n{stacktrace_str}" + ) else: - exceptions.append(f"Metric '{metric.name}': Error:\n{e!r}") + exceptions.append(f"Metric '{metric.name}': Error:\n{e!r}\n{stacktrace_str}") self._update_metrics() for metric in self.extra_metrics: try: eval_fn_args = self._get_args_for_metrics(metric, first_row_df) metric.eval_fn(*eval_fn_args) except Exception as e: + stacktrace_str = traceback.format_exc() if isinstance(e, MlflowException): - exceptions.append(f"Metric '{metric.name}': Error:\n{e.message}") + exceptions.append( + f"Metric '{metric.name}': Error:\n{e.message}\n{stacktrace_str}" + ) else: - exceptions.append(f"Metric '{metric.name}': Error:\n{e!r}") + exceptions.append(f"Metric '{metric.name}': Error:\n{e!r}\n{stacktrace_str}") if len(exceptions) > 0: raise MlflowException("\n".join(exceptions)) @@ -1661,6 +1671,18 @@ def _evaluate( ] elif self.model_type == _ModelType.TEXT: self.builtin_metrics = text_metrics + elif self.model_type == _ModelType.RETRIEVER: + if self.evaluator_config.get("k", None) is None: + self.evaluator_config["k"] = 3 # Setting the default k to 3 + k = self.evaluator_config.pop("k") + if not (isinstance(k, int) and k > 0): + _logger.warning( + "Cannot calculate 'precision_at_k' for invalid parameter 'k'." + f"'k' should be a positive integer; found: {k}" + "Skipping metric logging." + ) + else: + self.builtin_metrics = [precision_at_k(k)] self.y_pred = ( self.y_pred.squeeze() if isinstance(self.y_pred, pd.DataFrame) else self.y_pred diff --git a/tests/evaluate/test_default_evaluator.py b/tests/evaluate/test_default_evaluator.py index a961b8f435cc2..06c81a733d7e1 100644 --- a/tests/evaluate/test_default_evaluator.py +++ b/tests/evaluate/test_default_evaluator.py @@ -3197,6 +3197,222 @@ def test_evaluate_custom_metrics_string_values(): assert results.metrics["cm/eval_config_value_average"] == 3 +def validate_retriever_logged_data(logged_data): + columns = { + "question", + "outputs", # TODO: fix the logged data to name the model output column "retrieved_context" + # Right now, it's hard-coded "outputs", which is not ideal + "precision_at_k/v1/score", + "ground_truth", + } + + assert set(logged_data.columns.tolist()) == columns + + assert logged_data["question"].tolist() == ["q1?", "q1?", "q1?"] + assert logged_data["outputs"].tolist() == [["doc1", "doc3", "doc2"]] * 3 + assert (logged_data["precision_at_k/v1/score"] <= 1).all() + assert logged_data["ground_truth"].tolist() == [["doc1", "doc2"]] * 3 + + +def test_evaluate_retriever(): + X = pd.DataFrame({"question": ["q1?"] * 3, "ground_truth": [("doc1", "doc2")] * 3}) + + def fn(X): + return pd.DataFrame({"retrieved_context": [("doc1", "doc3", "doc2")] * len(X)}) + + with mlflow.start_run() as run: + results = mlflow.evaluate( + model=fn, + data=X, + targets="ground_truth", + model_type="retriever", + evaluators="default", + evaluator_config={ + "k": 3, + }, + ) + run = mlflow.get_run(run.info.run_id) + assert run.data.metrics == { + "precision_at_k/v1/mean": 2 / 3, + "precision_at_k/v1/variance": 0, + "precision_at_k/v1/p90": 2 / 3, + } + client = mlflow.MlflowClient() + artifacts = [a.path for a in client.list_artifacts(run.info.run_id)] + assert "eval_results_table.json" in artifacts + logged_data = pd.DataFrame(**results.artifacts["eval_results_table"].content) + validate_retriever_logged_data(logged_data) + assert set(results.metrics.keys()) == { + "precision_at_k/v1/p90", + "precision_at_k/v1/mean", + "precision_at_k/v1/variance", + } + assert results.metrics["precision_at_k/v1/p90"] == 2 / 3 + assert results.metrics["precision_at_k/v1/mean"] == 2 / 3 + assert results.metrics["precision_at_k/v1/variance"] == 0 + + # test with a big k to ensure we use min(k, len(retrieved_chunks)) + with mlflow.start_run() as run: + mlflow.evaluate( + model=fn, + data=X, + targets="ground_truth", + model_type="retriever", + evaluators="default", + evaluator_config={ + "k": 6, + }, + ) + run = mlflow.get_run(run.info.run_id) + assert run.data.metrics == { + "precision_at_k/v1/mean": 2 / 3, + "precision_at_k/v1/variance": 0, + "precision_at_k/v1/p90": 2 / 3, + } + + # test with default k + with mlflow.start_run() as run: + mlflow.evaluate( + model=fn, + data=X, + targets="ground_truth", + model_type="retriever", + ) + run = mlflow.get_run(run.info.run_id) + assert run.data.metrics == { + "precision_at_k/v1/mean": 2 / 3, + "precision_at_k/v1/variance": 0, + "precision_at_k/v1/p90": 2 / 3, + } + + # test with multiple chunks from same doc + def fn2(X): + return pd.DataFrame({"retrieved_context": [("doc1", "doc1", "doc3")] * len(X)}) + + X = pd.DataFrame({"question": ["q1?"] * 3, "ground_truth": [("doc1", "doc3")] * 3}) + + with mlflow.start_run() as run: + mlflow.evaluate( + model=fn2, + data=X, + targets="ground_truth", + model_type="retriever", + evaluator_config={ + "default": { + "k": 3, + } + }, + ) + run = mlflow.get_run(run.info.run_id) + assert run.data.metrics == { + "precision_at_k/v1/mean": 1, + "precision_at_k/v1/variance": 0, + "precision_at_k/v1/p90": 1, + } + + # test with empty retrieved doc + def fn3(X): + return pd.DataFrame({"output": [()] * len(X)}) + + with mlflow.start_run() as run: + mlflow.evaluate( + model=fn3, + data=X, + targets="ground_truth", + model_type="retriever", + evaluator_config={ + "default": { + "k": 3, + } + }, + ) + run = mlflow.get_run(run.info.run_id) + assert run.data.metrics == { + "precision_at_k/v1/mean": 1, + "precision_at_k/v1/variance": 0, + "precision_at_k/v1/p90": 1, + } + + # test with single retrieved doc + def fn4(X): + return pd.DataFrame({"output": [("doc1")] * len(X)}) + + with mlflow.start_run() as run: + mlflow.evaluate( + model=fn4, + data=X, + targets="ground_truth", + model_type="retriever", + evaluator_config={ + "default": { + "k": 3, + } + }, + ) + run = mlflow.get_run(run.info.run_id) + assert run.data.metrics == { + "precision_at_k/v1/mean": 1, + "precision_at_k/v1/variance": 0, + "precision_at_k/v1/p90": 1, + } + + # test with single ground truth doc + X_1 = pd.DataFrame({"question": ["q1?"] * 3, "ground_truth": [("doc1")] * 3}) + + with mlflow.start_run() as run: + mlflow.evaluate( + model=fn, + data=X_1, + targets="ground_truth", + model_type="retriever", + evaluator_config={ + "default": { + "k": 3, + } + }, + ) + run = mlflow.get_run(run.info.run_id) + assert run.data.metrics == { + "precision_at_k/v1/mean": 1 / 3, + "precision_at_k/v1/variance": 0, + "precision_at_k/v1/p90": 1 / 3, + } + + +def test_evaluate_precision_at_k_no_model_type(): + X = pd.DataFrame({"question": ["q1?"] * 3, "ground_truth": [("doc1", "doc2")] * 3}) + + def fn(X): + return pd.DataFrame({"retrieved_context": [("doc1", "doc3", "doc2")] * len(X)}) + + with mlflow.start_run() as run: + results = mlflow.evaluate( + model=fn, + data=X, + targets="ground_truth", + extra_metrics=[mlflow.metrics.precision_at_k(3)], + ) + run = mlflow.get_run(run.info.run_id) + assert run.data.metrics == { + "precision_at_k/v1/mean": 2 / 3, + "precision_at_k/v1/variance": 0, + "precision_at_k/v1/p90": 2 / 3, + } + client = mlflow.MlflowClient() + artifacts = [a.path for a in client.list_artifacts(run.info.run_id)] + assert "eval_results_table.json" in artifacts + logged_data = pd.DataFrame(**results.artifacts["eval_results_table"].content) + validate_retriever_logged_data(logged_data) + assert set(results.metrics.keys()) == { + "precision_at_k/v1/p90", + "precision_at_k/v1/mean", + "precision_at_k/v1/variance", + } + assert results.metrics["precision_at_k/v1/p90"] == 2 / 3 + assert results.metrics["precision_at_k/v1/mean"] == 2 / 3 + assert results.metrics["precision_at_k/v1/variance"] == 0 + + def test_evaluate_with_numpy_array(): data = [ ["What is MLflow?"], From 6e7e45af3ac14f536a0affe210536b3ce3cd6b49 Mon Sep 17 00:00:00 2001 From: Sunish Sheth Date: Wed, 25 Oct 2023 13:38:18 -0700 Subject: [PATCH 088/101] Fixing MetricValue justification type (#10131) Signed-off-by: Sunish Sheth --- mlflow/metrics/base.py | 2 +- mlflow/models/evaluation/default_evaluator.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mlflow/metrics/base.py b/mlflow/metrics/base.py index 538cd79e11f2f..0afd104aff6a7 100644 --- a/mlflow/metrics/base.py +++ b/mlflow/metrics/base.py @@ -16,7 +16,7 @@ class MetricValue: """ scores: List[float] = None - justifications: List[float] = None + justifications: List[str] = None aggregate_results: Dict[str, float] = None diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index a0d4f576424f3..a076915a90e9a 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1292,7 +1292,6 @@ def _generate_model_predictions(self, compute_latency=False): """ Helper method for generating model predictions """ - _logger.info("Computing model predictions.") def predict_with_latency(X_copy): y_pred_list = [] @@ -1333,6 +1332,8 @@ def predict_with_latency(X_copy): X_copy = self.X.copy_to_avoid_mutation() if self.model is not None: + _logger.info("Computing model predictions.") + if compute_latency: model_predictions = predict_with_latency(X_copy) else: From 0515ef748e2ec0f75970e5820004d4c083f5461a Mon Sep 17 00:00:00 2001 From: Chen Qian Date: Wed, 25 Oct 2023 14:18:43 -0700 Subject: [PATCH 089/101] Revert "Unpin pyspark" (#10133) --- .github/workflows/master.yml | 5 ++++- .github/workflows/recipe.yml | 7 +++++-- mlflow/utils/_spark_utils.py | 2 +- tests/data/test_delta_dataset_source.py | 2 +- tests/data/test_pandas_dataset.py | 2 +- tests/data/test_spark_dataset.py | 2 +- tests/data/test_spark_dataset_source.py | 2 +- tests/recipes/test_ingest_step.py | 2 +- tests/recipes/test_predict_step.py | 2 +- 9 files changed, 16 insertions(+), 10 deletions(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 2dd5fe3269726..953ea826d32e4 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -109,6 +109,8 @@ jobs: python -m venv .venv source .venv/bin/activate source ./dev/install-common-deps.sh --ml + # pyspark 3.5 is incompatible with delta 2.4 + pip install 'pyspark<3.5' - uses: ./.github/actions/pipdeptree - name: Import check run: | @@ -351,7 +353,8 @@ jobs: pip install -r requirements/test-requirements.txt pip install --no-dependencies tests/resources/mlflow-test-plugin pip install -e .[extras] - pip install pyspark + # pyspark 3.5 is incompatible with delta 2.4 + pip install 'pyspark<3.5' pip install mleap # Install Hugging Face datasets to test Hugging Face usage with MLflow dataset tracking pip install datasets diff --git a/.github/workflows/recipe.yml b/.github/workflows/recipe.yml index a3565e42e72f3..2ed6b04ead32a 100644 --- a/.github/workflows/recipe.yml +++ b/.github/workflows/recipe.yml @@ -42,7 +42,9 @@ jobs: - name: Install dependencies run: | source ./dev/install-common-deps.sh - pip install pyspark + pip install -e . + # pyspark 3.5 is incompatible with delta 2.4 + pip install 'pyspark<3.5' - name: Run tests run: | pytest tests/recipes @@ -68,7 +70,8 @@ jobs: pip install -r requirements/test-requirements.txt pip install --no-dependencies tests/resources/mlflow-test-plugin pip install -e . - pip install pyspark + # pyspark 3.5 is incompatible with delta 2.4 + pip install 'pyspark<3.5' # TODO: Importing datasets in a pandas UDF (created by mlflow.pyfunc.spark_udf) crashes # the Python worker. To avoid this, uninstall `datasets`. This is a temporary workaround. pip uninstall -y datasets diff --git a/mlflow/utils/_spark_utils.py b/mlflow/utils/_spark_utils.py index 80a2b417deedc..239ce113f3763 100644 --- a/mlflow/utils/_spark_utils.py +++ b/mlflow/utils/_spark_utils.py @@ -46,7 +46,7 @@ def _create_local_spark_session_for_recipes(): _prepare_subprocess_environ_for_creating_local_spark_session() return ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") + .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_delta_dataset_source.py b/tests/data/test_delta_dataset_source.py index b199e558b1d74..e9d8e4f9f766e 100644 --- a/tests/data/test_delta_dataset_source.py +++ b/tests/data/test_delta_dataset_source.py @@ -14,7 +14,7 @@ def spark_session(): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") + .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_pandas_dataset.py b/tests/data/test_pandas_dataset.py index c8244eabbb2a7..2a452593e3e26 100644 --- a/tests/data/test_pandas_dataset.py +++ b/tests/data/test_pandas_dataset.py @@ -24,7 +24,7 @@ def spark_session(): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") + .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_spark_dataset.py b/tests/data/test_spark_dataset.py index 2283f4bf30ca9..ae2fc350587b4 100644 --- a/tests/data/test_spark_dataset.py +++ b/tests/data/test_spark_dataset.py @@ -21,7 +21,7 @@ def spark_session(tmp_path): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") + .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_spark_dataset_source.py b/tests/data/test_spark_dataset_source.py index 78c0e0ccfee9b..7b68370f2696f 100644 --- a/tests/data/test_spark_dataset_source.py +++ b/tests/data/test_spark_dataset_source.py @@ -14,7 +14,7 @@ def spark_session(): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") + .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/recipes/test_ingest_step.py b/tests/recipes/test_ingest_step.py index 4da8f2f9f5f4d..a5799d8f74131 100644 --- a/tests/recipes/test_ingest_step.py +++ b/tests/recipes/test_ingest_step.py @@ -35,7 +35,7 @@ def spark_session(): with tempfile.TemporaryDirectory() as tmpdir: with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") + .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/recipes/test_predict_step.py b/tests/recipes/test_predict_step.py index 987bfabfc5298..60bb8d3846879 100644 --- a/tests/recipes/test_predict_step.py +++ b/tests/recipes/test_predict_step.py @@ -28,7 +28,7 @@ def spark_session(): with tempfile.TemporaryDirectory() as tmpdir: with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") + .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" From e562e4416b0b6406dc9d5755ccd08470ded322b3 Mon Sep 17 00:00:00 2001 From: Corey Zumar <39497902+dbczumar@users.noreply.github.com> Date: Wed, 25 Oct 2023 15:25:56 -0700 Subject: [PATCH 090/101] LLM Eval: Support passing a single grading context column as a string (#10136) Signed-off-by: dbczumar --- mlflow/metrics/genai/genai_metric.py | 16 +++--- tests/metrics/genai/test_genai_metrics.py | 65 +++++++++++++++++++++++ 2 files changed, 74 insertions(+), 7 deletions(-) diff --git a/mlflow/metrics/genai/genai_metric.py b/mlflow/metrics/genai/genai_metric.py index da635f14d2f14..879d4addaefa7 100644 --- a/mlflow/metrics/genai/genai_metric.py +++ b/mlflow/metrics/genai/genai_metric.py @@ -3,7 +3,7 @@ import re from concurrent.futures import ThreadPoolExecutor, as_completed from inspect import Parameter, Signature -from typing import TYPE_CHECKING, Any, Dict, List, Optional +from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union from mlflow.exceptions import MlflowException from mlflow.metrics.base import EvaluationExample, MetricValue @@ -90,7 +90,7 @@ def make_genai_metric( examples: Optional[List[EvaluationExample]] = None, version: Optional[str] = _get_latest_metric_version(), model: Optional[str] = _get_default_model(), - grading_context_columns: Optional[List[str]] = [], # noqa: B006 + grading_context_columns: Optional[Union[str, List[str]]] = [], # noqa: B006 parameters: Optional[Dict[str, Any]] = None, aggregations: Optional[List[str]] = ["mean", "variance", "p90"], # noqa: B006 greater_is_better: bool = True, @@ -109,10 +109,11 @@ def make_genai_metric( "openai:/gpt-4" or "gateway:/my-route". Defaults to "openai:/gpt-4". Your use of a third party LLM service (e.g., OpenAI) for evaluation may be subject to and governed by the LLM service's terms of use. - :param grading_context_columns: (Optional) grading_context_columns required to compute - the metric. These grading_context_columns are used by the LLM as a judge as additional - information to compute the metric. The columns are extracted from the input dataset or - output predictions based on col_mapping in evaluator_config. + :param grading_context_columns: (Optional) The name of the grading context column, or a list of + grading context column names, required to compute the metric. The + ``grading_context_columns`` are used by the LLM as a judge as additional information to + compute the metric. The columns are extracted from the input dataset or output predictions + based on ``col_mapping`` in the ``evaluator_config`` passed to :py:func:`mlflow.evaluate()`. :param parameters: (Optional) Parameters for the LLM used to compute the metric. By default, we set the temperature to 0.0, max_tokens to 200, and top_p to 1.0. We recommend setting the temperature to 0.0 for the LLM used as a judge to ensure consistent results. @@ -187,6 +188,8 @@ def make_genai_metric( greater_is_better=True, ) """ + if not isinstance(grading_context_columns, list): + grading_context_columns = [grading_context_columns] class_name = f"mlflow.metrics.genai.prompts.{version}.EvaluationModel" try: @@ -221,7 +224,6 @@ def eval_fn( """ This is the function that is called when the metric is evaluated. """ - eval_values = dict(zip(grading_context_columns, args)) outputs = predictions.to_list() diff --git a/tests/metrics/genai/test_genai_metrics.py b/tests/metrics/genai/test_genai_metrics.py index 88153dd778f47..882a7f2131999 100644 --- a/tests/metrics/genai/test_genai_metrics.py +++ b/tests/metrics/genai/test_genai_metrics.py @@ -270,6 +270,71 @@ def test_make_genai_metric_correct_response(): assert metric_value.aggregate_results == {"mean": 3.0, "p90": 3.0, "variance": 0.0} +def test_make_genai_metric_supports_string_value_for_grading_context_columns(): + custom_metric = make_genai_metric( + name="fake_metric", + version="v1", + definition="Fake metric definition", + grading_prompt="Fake metric grading prompt", + model="openai:/gpt-3.5-turbo", + grading_context_columns="targets", + greater_is_better=True, + examples=[ + EvaluationExample( + input="example-input", + output="example-output", + score=4, + justification="example-justification", + grading_context={"targets": "example-ground_truth"}, + ) + ], + ) + + assert [ + param.name for param in inspect.signature(custom_metric.eval_fn).parameters.values() + ] == ["predictions", "metrics", "inputs", "targets"] + + with mock.patch.object( + model_utils, + "score_model_on_payload", + return_value=properly_formatted_openai_response1, + ) as mock_predict_function: + metric_value = custom_metric.eval_fn( + pd.Series(["prediction"]), + {}, + pd.Series(["input"]), + pd.Series(["ground_truth"]), + ) + assert mock_predict_function.call_count == 1 + assert mock_predict_function.call_args[0][0] == "openai:/gpt-3.5-turbo" + assert mock_predict_function.call_args[0][1] == { + "prompt": "\nTask:\nYou are an impartial judge. You will be given an input that was " + "sent to a machine\nlearning model, and you will be given an output that the model " + "produced. You\nmay also be given additional information that was used by the model " + "to generate the output.\n\nYour task is to determine a numerical score called " + "fake_metric based on the input and output.\nA definition of " + "fake_metric and a grading rubric are provided below.\nYou must use the " + "grading rubric to determine your score. You must also justify your score." + "\n\nExamples could be included below for reference. Make sure to use them as " + "references and to\nunderstand them before completing the task.\n" + "\nInput:\ninput\n\nOutput:\nprediction\n\nAdditional information used by the model:\n" + "key: targets\nvalue:\nground_truth\n\nMetric definition:\nFake metric definition\n\n" + "Grading rubric:\nFake metric grading prompt\n\nExamples:\n\nInput:\nexample-input\n\n" + "Output:\nexample-output\n\nAdditional information used by the model:\nkey: targets\n" + "value:\nexample-ground_truth\n\nscore: 4\njustification: " + "example-justification\n \n\nYou must return the following fields in your " + "response one below the other:\nscore: Your numerical score for the model's " + "fake_metric based on the rubric\njustification: Your step-by-step reasoning about " + "the model's fake_metric score\n ", + "temperature": 0.0, + "max_tokens": 200, + "top_p": 1.0, + } + assert metric_value.scores == [3] + assert metric_value.justifications == [openai_justification1] + assert metric_value.aggregate_results == {"mean": 3.0, "p90": 3.0, "variance": 0.0} + + def test_make_genai_metric_incorrect_response(): custom_metric = make_genai_metric( name="correctness", From bf76f43321018ec30fe241c35a9a898d121f3b60 Mon Sep 17 00:00:00 2001 From: Bryan Qiu <55931436+bbqiu@users.noreply.github.com> Date: Wed, 25 Oct 2023 15:33:24 -0700 Subject: [PATCH 091/101] Refactor metrics_definitions.py (#10059) Signed-off-by: Bryan Qiu --- mlflow/metrics/metric_definitions.py | 183 +++++++++++++-------------- 1 file changed, 88 insertions(+), 95 deletions(-) diff --git a/mlflow/metrics/metric_definitions.py b/mlflow/metrics/metric_definitions.py index 0cf86da775062..facdc8d1cb079 100644 --- a/mlflow/metrics/metric_definitions.py +++ b/mlflow/metrics/metric_definitions.py @@ -8,6 +8,11 @@ _logger = logging.getLogger(__name__) +targets_col_specifier = "the column specified by the `targets` parameter" +predictions_col_specifier = ( + "the column specified by the `predictions` parameter or the model output column" +) + def standard_aggregations(scores): return { @@ -17,16 +22,16 @@ def standard_aggregations(scores): } -def _validate_text_data(data, metric_name, column_name): - """Validates that the data is text and is non-empty""" - if len(data) == 0: +def _validate_text_data(data, metric_name, col_specifier): + """Validates that the data is a list of strs and is non-empty""" + if data is None or len(data) == 0: return False for row, line in enumerate(data): if not isinstance(line, str): _logger.warning( f"Cannot calculate {metric_name} for non-string inputs. " - + f"Non-string found for {column_name} on row {row}. skipping metric logging." + f"Non-string found for {col_specifier} on row {row}. Skipping metric logging." ) return False @@ -82,7 +87,7 @@ def _cached_evaluate_load(path, module_type=None): def _toxicity_eval_fn(predictions, targets=None, metrics=None): - if not _validate_text_data(predictions, "toxicity", "predictions"): + if not _validate_text_data(predictions, "toxicity", predictions_col_specifier): return try: toxicity = _cached_evaluate_load("toxicity", module_type="measurement") @@ -106,7 +111,7 @@ def _toxicity_eval_fn(predictions, targets=None, metrics=None): def _flesch_kincaid_eval_fn(predictions, targets=None, metrics=None): - if not _validate_text_data(predictions, "flesch_kincaid", "predictions"): + if not _validate_text_data(predictions, "flesch_kincaid", predictions_col_specifier): return try: @@ -123,7 +128,7 @@ def _flesch_kincaid_eval_fn(predictions, targets=None, metrics=None): def _ari_eval_fn(predictions, targets=None, metrics=None): - if not _validate_text_data(predictions, "ari", "predictions"): + if not _validate_text_data(predictions, "ari", predictions_col_specifier): return try: @@ -150,111 +155,99 @@ def _accuracy_eval_fn(predictions, targets=None, metrics=None, sample_weight=Non def _rouge1_eval_fn(predictions, targets=None, metrics=None): - if targets is not None and len(targets) != 0: - if not _validate_text_data(targets, "rouge1", "targets") or not _validate_text_data( - predictions, "rouge1", "predictions" - ): - return + if not _validate_text_data(targets, "rouge1", targets_col_specifier) or not _validate_text_data( + predictions, "rouge1", predictions_col_specifier + ): + return - try: - rouge = _cached_evaluate_load("rouge") - except Exception as e: - _logger.warning( - f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging." - ) - return + try: + rouge = _cached_evaluate_load("rouge") + except Exception as e: + _logger.warning(f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging.") + return - scores = rouge.compute( - predictions=predictions, - references=targets, - rouge_types=["rouge1"], - use_aggregator=False, - )["rouge1"] - return MetricValue( - scores=scores, - aggregate_results=standard_aggregations(scores), - ) + scores = rouge.compute( + predictions=predictions, + references=targets, + rouge_types=["rouge1"], + use_aggregator=False, + )["rouge1"] + return MetricValue( + scores=scores, + aggregate_results=standard_aggregations(scores), + ) def _rouge2_eval_fn(predictions, targets=None, metrics=None): - if targets is not None and len(targets) != 0: - if not _validate_text_data(targets, "rouge2", "targets") or not _validate_text_data( - predictions, "rouge2", "predictions" - ): - return + if not _validate_text_data(targets, "rouge2", targets_col_specifier) or not _validate_text_data( + predictions, "rouge2", predictions_col_specifier + ): + return - try: - rouge = _cached_evaluate_load("rouge") - except Exception as e: - _logger.warning( - f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging." - ) - return + try: + rouge = _cached_evaluate_load("rouge") + except Exception as e: + _logger.warning(f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging.") + return - scores = rouge.compute( - predictions=predictions, - references=targets, - rouge_types=["rouge2"], - use_aggregator=False, - )["rouge2"] - return MetricValue( - scores=scores, - aggregate_results=standard_aggregations(scores), - ) + scores = rouge.compute( + predictions=predictions, + references=targets, + rouge_types=["rouge2"], + use_aggregator=False, + )["rouge2"] + return MetricValue( + scores=scores, + aggregate_results=standard_aggregations(scores), + ) def _rougeL_eval_fn(predictions, targets=None, metrics=None): - if targets is not None and len(targets) != 0: - if not _validate_text_data(targets, "rougeL", "targets") or not _validate_text_data( - predictions, "rougeL", "predictions" - ): - return + if not _validate_text_data(targets, "rougeL", targets_col_specifier) or not _validate_text_data( + predictions, "rougeL", predictions_col_specifier + ): + return - try: - rouge = _cached_evaluate_load("rouge") - except Exception as e: - _logger.warning( - f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging." - ) - return + try: + rouge = _cached_evaluate_load("rouge") + except Exception as e: + _logger.warning(f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging.") + return - scores = rouge.compute( - predictions=predictions, - references=targets, - rouge_types=["rougeL"], - use_aggregator=False, - )["rougeL"] - return MetricValue( - scores=scores, - aggregate_results=standard_aggregations(scores), - ) + scores = rouge.compute( + predictions=predictions, + references=targets, + rouge_types=["rougeL"], + use_aggregator=False, + )["rougeL"] + return MetricValue( + scores=scores, + aggregate_results=standard_aggregations(scores), + ) def _rougeLsum_eval_fn(predictions, targets=None, metrics=None): - if targets is not None and len(targets) != 0: - if not _validate_text_data(targets, "rougeLsum", "targets") or not _validate_text_data( - predictions, "rougeLsum", "predictions" - ): - return + if not _validate_text_data( + targets, "rougeLsum", targets_col_specifier + ) or not _validate_text_data(predictions, "rougeLsum", predictions_col_specifier): + return - try: - rouge = _cached_evaluate_load("rouge") - except Exception as e: - _logger.warning( - f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging." - ) - return + try: + rouge = _cached_evaluate_load("rouge") + except Exception as e: + _logger.warning(f"Failed to load 'rouge' metric (error: {e!r}), skipping metric logging.") + return - scores = rouge.compute( - predictions=predictions, - references=targets, - rouge_types=["rougeLsum"], - use_aggregator=False, - )["rougeLsum"] - return MetricValue( - scores=scores, - aggregate_results=standard_aggregations(scores), - ) + scores = rouge.compute( + predictions=predictions, + references=targets, + rouge_types=["rougeLsum"], + use_aggregator=False, + )["rougeLsum"] + return MetricValue( + scores=scores, + aggregate_results=standard_aggregations(scores), + ) def _mae_eval_fn(predictions, targets=None, metrics=None, sample_weight=None): From e2d3511a1e9ebcb13ecd9288e37976f6d0b8a91c Mon Sep 17 00:00:00 2001 From: Bryan Qiu <55931436+bbqiu@users.noreply.github.com> Date: Wed, 25 Oct 2023 15:43:51 -0700 Subject: [PATCH 092/101] improving retriever mvp docs (#10129) Signed-off-by: Bryan Qiu --- mlflow/metrics/__init__.py | 62 +++++++++---------- mlflow/metrics/metric_definitions.py | 4 +- mlflow/models/evaluation/base.py | 10 +-- mlflow/models/evaluation/default_evaluator.py | 4 +- 4 files changed, 38 insertions(+), 42 deletions(-) diff --git a/mlflow/metrics/__init__.py b/mlflow/metrics/__init__.py index 2c6f1d7770bff..6791b02650040 100644 --- a/mlflow/metrics/__init__.py +++ b/mlflow/metrics/__init__.py @@ -246,41 +246,39 @@ def precision_at_k(k) -> EvaluationMetric: """ This function will create a metric for calculating ``precision_at_k`` for retriever models. - For retriever models, it's recommended to use a static dataset represented by a Pandas - Dataframe or an MLflow Pandas Dataset containing the input queries, retrieved relevant - document IDs, and the ground-truth relevant document IDs for the evaluation. A - "document ID" should be a string that identifies a document. For each row, the retrieved - relevant document IDs and the ground-truth relevant document IDs should be provided as - a tuple of document ID strings. The column name of the retrieved relevant document IDs - should be specified by the ``predictions`` parameter, and the column name of the - ground-truth relevant document IDs should be specified by the ``targets`` parameter. - Alternatively, you can use a function that returns a tuple of document ID strings for - the evaluation. The function should take a Pandas DataFrame as input and return a Pandas - DataFrame with the same number of rows, where each row contains a tuple of document ID - strings. The output column name of the function should be specified by the ``predictions`` + It is recommended to use a static dataset (Pandas Dataframe or MLflow Pandas Dataset) + containing columns for: input queries, retrieved relevant doc IDs, and ground-truth doc IDs. A + "doc ID" is a string that uniquely identifies a document. All doc IDs should be entered as a + tuple of doc ID strings. + + The ``targets`` parameter should specify the column name of the ground-truth relevant doc IDs. + + If you choose to use a static dataset, the ``predictions`` parameter should specify the column + name of the retrieved relevant doc IDs. Alternatively, if you choose to specify a function for + the ``model`` parameter, the function should take a Pandas DataFrame as input and return a + Pandas DataFrame with a column of retrieved relevant doc IDs, specified by the ``predictions`` parameter. + ``k`` should be a positive integer specifying the number of retrieved doc IDs to consider for + each input query. ``k`` defaults to 3. + This metric computes a score between 0 and 1 for each row representing the precision of the - retriever model at the given k value. The score is calculated by dividing the number of relevant - documents retrieved by the total number of documents retrieved or k, whichever is smaller. - If no relevant documents are retrieved, the score is 1, indication that no false positives were - retrieved. - - The model output should be a pandas dataframe with a column containing a tuple of strings on - each row. The strings in the tuple represent the document IDs. - The label column should contain a tuple of strings representing the relevant - document IDs for each row, provided by the input ``data`` parameter. - The ``k`` parameter should be a positive integer representing the number of retrieved documents - to evaluate for each row. ``k`` defaults to 3. - - This metric is a default metric for the ``retriever`` model type. - When the model type is ``"retriever"``, this metric will be calculated automatically with the - default ``k`` value of 3. To use another ``k`` value, use the ``evaluator_config`` parameter - in the ``mlflow.evaluate()`` API as follows: ``evaluator_config={"k": }``. - Alternatively, you can directly specify the ``mlflow.metrics.precision_at_k()`` metric - in the ``extra_metrics`` parameter of the ``mlflow.evaluate()`` API without specifying a model - type. In this case, the ``k`` value specified in the ``evaluator_config`` parameter will be - ignored. + retriever model at the given ``k`` value. If no relevant documents are retrieved, the score is + 0, indicating that no relevant docs were retrieved. Let ``x = min(k, # of retrieved doc IDs)``. + Then, the precision at k is calculated as follows: + + ``precision_at_k`` = (# of relevant retrieved doc IDs in top-``x`` ranked docs) / ``x``. + + This metric is a builtin metric for the ``'retriever'`` model type, meaning it will be + automatically calculated with a default ``k`` value of 3. To use another ``k`` value, you have + two options with the :py:func:`mlflow.evaluate` API: + + 1. ``evaluator_config={"k": 5}`` + 2. ``extra_metrics = [mlflow.metrics.precision_at_k(k=5)]`` + + Note that the ``k`` value in the ``evaluator_config`` will be ignored in this case. It is + recommended to remove the ``model_type`` as well, or else precision@3 and precision@5 will + both be calculated. """ return make_metric( eval_fn=_precision_at_k_eval_fn(k), diff --git a/mlflow/metrics/metric_definitions.py b/mlflow/metrics/metric_definitions.py index facdc8d1cb079..c547e973e93dd 100644 --- a/mlflow/metrics/metric_definitions.py +++ b/mlflow/metrics/metric_definitions.py @@ -45,9 +45,9 @@ def _validate_and_fix_text_tuple_data(data, metric_name, column_name): for index, value in data.items(): if not isinstance(value, tuple) or not all(isinstance(val, str) for val in value): + # Single entry tuples are automatically unpacked by Pandas. + # So if the entry is a string, put it back into a tuple. if isinstance(value, str): - # Single entry tuples get unpacked. - # So if the entry is a string, put them back into a tuple. data[index] = (value,) else: _logger.warning( diff --git a/mlflow/models/evaluation/base.py b/mlflow/models/evaluation/base.py index fa67531c897b0..9b49fd5183ca3 100644 --- a/mlflow/models/evaluation/base.py +++ b/mlflow/models/evaluation/base.py @@ -1306,8 +1306,8 @@ def evaluate( https://pypi.org/project/textstat - For retriever models, the default evaluator logs: - - **metrics**: ``precision_at_k``: precision at k with the default value of k = 3. To use - a different value of k, specify the ``evaluator_config`` parameter to include ``"k"``: + - **metrics**: ``precision_at_k``: has a default value of k = 3. To use a different + value for k, include ``"k"`` in the ``evaluator_config`` parameter: ``evaluator_config={"k":5}``. - **artifacts**: A JSON file containing the inputs, outputs, targets, and per-row metrics of the model in tabular format. @@ -1355,9 +1355,9 @@ def evaluate( metrics. - **col_mapping**: A dictionary mapping column names in the input dataset or output predictions to column names used when invoking the evaluation functions. - - **k**: The number of top retrieved documents to use when computing the built-in metric - precision_at_k for model type "retriever". Default value is 3. For other model types, - this parameter will be ignored. + - **k**: The number of top-ranked retrieved documents to use when computing the built-in + metric ``precision_at_k`` for model_type="retriever". Default value is 3. For all other + model types, this parameter will be ignored. - Limitations of evaluation dataset: - For classification tasks, dataset labels are used to infer the total number of classes. diff --git a/mlflow/models/evaluation/default_evaluator.py b/mlflow/models/evaluation/default_evaluator.py index a076915a90e9a..a972dff8fb1b5 100644 --- a/mlflow/models/evaluation/default_evaluator.py +++ b/mlflow/models/evaluation/default_evaluator.py @@ -1673,9 +1673,7 @@ def _evaluate( elif self.model_type == _ModelType.TEXT: self.builtin_metrics = text_metrics elif self.model_type == _ModelType.RETRIEVER: - if self.evaluator_config.get("k", None) is None: - self.evaluator_config["k"] = 3 # Setting the default k to 3 - k = self.evaluator_config.pop("k") + k = self.evaluator_config.pop("k", 3) # default k to 3 if not specified if not (isinstance(k, int) and k > 0): _logger.warning( "Cannot calculate 'precision_at_k' for invalid parameter 'k'." From 866245f22f5016ca32d38d35f625ad9a358f6017 Mon Sep 17 00:00:00 2001 From: mberk06 Date: Wed, 25 Oct 2023 19:40:20 -0600 Subject: [PATCH 093/101] Michaelberk/add style to contributing guide (#9907) Signed-off-by: mberk06 Signed-off-by: Michael Berk Co-authored-by: Michael Berk --- CONTRIBUTING.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c4e562cb33771..ba366f2648353 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -11,6 +11,7 @@ We welcome community contributions to MLflow. This page provides useful informat - [Write designs for significant changes](#write-designs-for-significant-changes) - [Make changes backwards compatible](#make-changes-backwards-compatible) - [Consider introducing new features as MLflow Plugins](#consider-introducing-new-features-as-mlflow-plugins) + - [Python Style Guide](#python-style-guide) - [Setting up the repository](#setting-up-the-repository) - [Developing and testing MLflow](#developing-and-testing-mlflow) - [Environment Setup and Python configuration](#environment-setup-and-python-configuration) @@ -180,6 +181,35 @@ base](https://github.com/mlflow/mlflow/blob/cdc6a651d5af0f29bd448d2c87a198cf5d32 For more information about Plugins, see . +### Python Style Guide + +##### Docstrings + +We follow [Google's Python Style Guide](https://google.github.io/styleguide/pyguide.html) +for writing docstrings. Make sure your docstrings adhere to this style +guide. + +The process for converting to a standard docstring format style is +ongoing. If you see a docstring in the code base that doesn't adhere +to this formatting style and you'd like to contribute a fix, feel free +to open a PR to correct the docstring formatting. + +###### Code Style + +We use [pylint](https://pypi.org/project/pylint/), +[black](https://black.readthedocs.io/en/stable/the_black_code_style/index.html), +and [ruff](https://github.com/astral-sh/ruff) in our CI via +pre-commit Git hooks. If your code passes the CI checks, it's +formatted correctly. + +To validate that your local versions of the above libraries +match those in the mlflow CI, refer to [lint-requirements.txt](https://github.com/mlflow/mlflow/blob/master/requirements/lint-requirements.txt). +You can compare these versions with your local using pip: + +```bash +pip show pylint +``` + ## Setting up the repository To set up the MLflow repository, run the following commands: From 99b6a803828bfc76af7963a3b8fd48119f939c6d Mon Sep 17 00:00:00 2001 From: Chen Qian Date: Wed, 25 Oct 2023 18:56:22 -0700 Subject: [PATCH 094/101] Change the way of validating databricks auth in `mlflow.login()` (#10139) Signed-off-by: chenmoneygithub --- mlflow/utils/credentials.py | 5 +++-- tests/utils/test_credentials.py | 9 +++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/mlflow/utils/credentials.py b/mlflow/utils/credentials.py index 804f2740172a0..ff34c5db7e623 100644 --- a/mlflow/utils/credentials.py +++ b/mlflow/utils/credentials.py @@ -89,13 +89,14 @@ def _check_databricks_auth(): try: w = WorkspaceClient() # If credentials are invalid, `clusters.list()` will throw an error. - w.current_user.me() + w.clusters.list() _logger.info( "Succesfully signed in Databricks! Please run `mlflow.set_tracking_uri('databricks')` " "to connect MLflow to Databricks tracking server." ) return True - except Exception: + except Exception as e: + _logger.error(f"Failed to sign in Databricks: {e}") return False diff --git a/tests/utils/test_credentials.py b/tests/utils/test_credentials.py index f3701583cb1a6..2e05dd5fd8a65 100644 --- a/tests/utils/test_credentials.py +++ b/tests/utils/test_credentials.py @@ -104,6 +104,7 @@ def test_read_mlflow_creds_env_takes_precedence_over_file(tmp_path, monkeypatch) def test_mlflow_login(tmp_path, monkeypatch): + # Mock `input()` and `getpass()` to return host, username and password in order. with patch( "builtins.input", side_effect=["https://community.cloud.databricks.com/", "dummyusername"] ), patch("getpass.getpass", side_effect=["dummypassword"]): @@ -113,12 +114,12 @@ def test_mlflow_login(tmp_path, monkeypatch): monkeypatch.setenv("DATABRICKS_CONFIG_PROFILE", profile) class FakeWorkspaceClient: - class FakeUser: - def me(self): - return ["dummyusername"] + class FakeClusters: + def list(self): + return ["dummy_cluster"] def __init__(self): - self.current_user = FakeWorkspaceClient.FakeUser() + self.clusters = FakeWorkspaceClient.FakeClusters() with patch( "databricks.sdk.WorkspaceClient", From 87f1abb564c01b8752bffb76fb15e48417c5f8f0 Mon Sep 17 00:00:00 2001 From: Ben Wilson <39283302+BenWilson2@users.noreply.github.com> Date: Wed, 25 Oct 2023 22:24:02 -0400 Subject: [PATCH 095/101] Disallow heterogeneous memory configuration when saving transformers model (#10087) Signed-off-by: Ben Wilson --- mlflow/transformers/__init__.py | 26 +++++++ .../test_transformers_model_export.py | 67 +++++++++++++++++++ 2 files changed, 93 insertions(+) diff --git a/mlflow/transformers/__init__.py b/mlflow/transformers/__init__.py index 015196a54d2f5..31f567cc75317 100644 --- a/mlflow/transformers/__init__.py +++ b/mlflow/transformers/__init__.py @@ -426,6 +426,20 @@ def save_model( else: built_pipeline = transformers_model + # Verify that the model has not been loaded to distributed memory + # NB: transformers does not correctly save a model whose weights have been loaded + # using accelerate iff the model weights have been loaded using a device_map that is + # heterogeneous. There is a distinct possibility for a partial write to occur, causing an + # invalid state of the model's weights in this scenario. Hence, we raise. + if _is_model_distributed_in_memory(built_pipeline.model): + raise MlflowException( + "The model that is attempting to be saved has been loaded into memory " + "with an incompatible configuration. If you are using the accelerate " + "library to load your model, please ensure that it is saved only after " + "loading with the default device mapping. Do not specify `device_map` " + "and please try again." + ) + if mlflow_model is None: mlflow_model = Model() if signature is not None: @@ -852,6 +866,18 @@ def load_model( return _load_model(local_model_path, flavor_config, return_type, device, **kwargs) +def _is_model_distributed_in_memory(transformers_model): + """Check if the model is distributed across multiple devices in memory.""" + + # Check if the model attribute exists. If not, accelerate was not used and the model can + # be safely saved + if not hasattr(transformers_model, "hf_device_map"): + return False + # If the device map has more than one unique value entry, then the weights are not within + # a contiguous memory system (VRAM, SYS, or DISK) and thus cannot be safely saved. + return len(set(transformers_model.hf_device_map.values())) > 1 + + # This function attempts to determine if a GPU is available for the PyTorch and TensorFlow libraries def is_gpu_available(): # try pytorch and if it fails, try tf diff --git a/tests/transformers/test_transformers_model_export.py b/tests/transformers/test_transformers_model_export.py index 6d2df7369f326..e3284bede05cf 100644 --- a/tests/transformers/test_transformers_model_export.py +++ b/tests/transformers/test_transformers_model_export.py @@ -46,6 +46,7 @@ _get_instance_type, _get_or_infer_task_type, _infer_transformers_task_type, + _is_model_distributed_in_memory, _record_pipeline_components, _should_add_pyfunc_to_model, _TransformersModel, @@ -3798,3 +3799,69 @@ def predict(self, context, model_input, params=None): python_model=TestModel(), artifacts={"some-model": "hf:/invalid-repo-id"}, ) + + +def test_model_distributed_across_devices(): + mock_model = mock.Mock() + mock_model.device.type = "meta" + mock_model.hf_device_map = { + "layer1": mock.Mock(type="cpu"), + "layer2": mock.Mock(type="cpu"), + "layer3": mock.Mock(type="gpu"), + "layer4": mock.Mock(type="disk"), + } + + assert _is_model_distributed_in_memory(mock_model) + + +def test_model_on_single_device(): + mock_model = mock.Mock() + mock_model.device.type = "cpu" + mock_model.hf_device_map = {} + + assert not _is_model_distributed_in_memory(mock_model) + + +def test_basic_model_with_accelerate_device_mapping_fails_save(tmp_path): + task = "translation_en_to_de" + architecture = "t5-small" + model = transformers.T5ForConditionalGeneration.from_pretrained( + pretrained_model_name_or_path=architecture, + device_map={"shared": "cpu", "encoder": "cpu", "decoder": "disk", "lm_head": "disk"}, + offload_folder=str(tmp_path / "weights"), + low_cpu_mem_usage=True, + ) + + tokenizer = transformers.T5TokenizerFast.from_pretrained( + pretrained_model_name_or_path=architecture, model_max_length=100 + ) + pipeline = transformers.pipeline(task=task, model=model, tokenizer=tokenizer) + + with pytest.raises( + MlflowException, + match="The model that is attempting to be saved has been loaded into memory", + ): + mlflow.transformers.save_model(transformers_model=pipeline, path=str(tmp_path / "model")) + + +def test_basic_model_with_accelerate_homogeneous_mapping_works(tmp_path): + task = "translation_en_to_de" + architecture = "t5-small" + model = transformers.T5ForConditionalGeneration.from_pretrained( + pretrained_model_name_or_path=architecture, + device_map={"shared": "cpu", "encoder": "cpu", "decoder": "cpu", "lm_head": "cpu"}, + low_cpu_mem_usage=True, + ) + + tokenizer = transformers.T5TokenizerFast.from_pretrained( + pretrained_model_name_or_path=architecture, model_max_length=100 + ) + pipeline = transformers.pipeline(task=task, model=model, tokenizer=tokenizer) + + mlflow.transformers.save_model(transformers_model=pipeline, path=str(tmp_path / "model")) + + loaded = mlflow.transformers.load_model(str(tmp_path / "model")) + + text = "Apples are delicious" + + assert loaded(text) == pipeline(text) From 19fa5c3acd03715b9d5f60222d58f54ddfd6bb33 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Thu, 26 Oct 2023 11:32:05 +0900 Subject: [PATCH 096/101] Revert "Revert "Unpin pyspark"" (#10148) Signed-off-by: harupy <17039389+harupy@users.noreply.github.com> Signed-off-by: B-Step62 Co-authored-by: Yuki Watanabe <31463517+B-Step62@users.noreply.github.com> --- .github/actions/py-cache-key/action.yml | 2 +- .github/workflows/master.yml | 5 +---- .github/workflows/recipe.yml | 7 ++----- mlflow/utils/_spark_utils.py | 2 +- tests/data/test_delta_dataset_source.py | 2 +- tests/data/test_pandas_dataset.py | 2 +- tests/data/test_spark_dataset.py | 2 +- tests/data/test_spark_dataset_source.py | 2 +- tests/recipes/test_ingest_step.py | 2 +- tests/recipes/test_predict_step.py | 2 +- 10 files changed, 11 insertions(+), 17 deletions(-) diff --git a/.github/actions/py-cache-key/action.yml b/.github/actions/py-cache-key/action.yml index 45dda7405d01b..659731dd3564f 100644 --- a/.github/actions/py-cache-key/action.yml +++ b/.github/actions/py-cache-key/action.yml @@ -22,5 +22,5 @@ runs: # Refresh cache daily DATE=$(date -u "+%Y%m%d") # Change this value to force a cache refresh - N=0 + N=1 echo "value=$RUNNER_IMAGE-$PYTHON_VERSION-$DATE-$REQUIREMENTS_HASH-$N" >> $GITHUB_OUTPUT diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 953ea826d32e4..2dd5fe3269726 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -109,8 +109,6 @@ jobs: python -m venv .venv source .venv/bin/activate source ./dev/install-common-deps.sh --ml - # pyspark 3.5 is incompatible with delta 2.4 - pip install 'pyspark<3.5' - uses: ./.github/actions/pipdeptree - name: Import check run: | @@ -353,8 +351,7 @@ jobs: pip install -r requirements/test-requirements.txt pip install --no-dependencies tests/resources/mlflow-test-plugin pip install -e .[extras] - # pyspark 3.5 is incompatible with delta 2.4 - pip install 'pyspark<3.5' + pip install pyspark pip install mleap # Install Hugging Face datasets to test Hugging Face usage with MLflow dataset tracking pip install datasets diff --git a/.github/workflows/recipe.yml b/.github/workflows/recipe.yml index 2ed6b04ead32a..a3565e42e72f3 100644 --- a/.github/workflows/recipe.yml +++ b/.github/workflows/recipe.yml @@ -42,9 +42,7 @@ jobs: - name: Install dependencies run: | source ./dev/install-common-deps.sh - pip install -e . - # pyspark 3.5 is incompatible with delta 2.4 - pip install 'pyspark<3.5' + pip install pyspark - name: Run tests run: | pytest tests/recipes @@ -70,8 +68,7 @@ jobs: pip install -r requirements/test-requirements.txt pip install --no-dependencies tests/resources/mlflow-test-plugin pip install -e . - # pyspark 3.5 is incompatible with delta 2.4 - pip install 'pyspark<3.5' + pip install pyspark # TODO: Importing datasets in a pandas UDF (created by mlflow.pyfunc.spark_udf) crashes # the Python worker. To avoid this, uninstall `datasets`. This is a temporary workaround. pip uninstall -y datasets diff --git a/mlflow/utils/_spark_utils.py b/mlflow/utils/_spark_utils.py index 239ce113f3763..80a2b417deedc 100644 --- a/mlflow/utils/_spark_utils.py +++ b/mlflow/utils/_spark_utils.py @@ -46,7 +46,7 @@ def _create_local_spark_session_for_recipes(): _prepare_subprocess_environ_for_creating_local_spark_session() return ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_delta_dataset_source.py b/tests/data/test_delta_dataset_source.py index e9d8e4f9f766e..b199e558b1d74 100644 --- a/tests/data/test_delta_dataset_source.py +++ b/tests/data/test_delta_dataset_source.py @@ -14,7 +14,7 @@ def spark_session(): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_pandas_dataset.py b/tests/data/test_pandas_dataset.py index 2a452593e3e26..c8244eabbb2a7 100644 --- a/tests/data/test_pandas_dataset.py +++ b/tests/data/test_pandas_dataset.py @@ -24,7 +24,7 @@ def spark_session(): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_spark_dataset.py b/tests/data/test_spark_dataset.py index ae2fc350587b4..2283f4bf30ca9 100644 --- a/tests/data/test_spark_dataset.py +++ b/tests/data/test_spark_dataset.py @@ -21,7 +21,7 @@ def spark_session(tmp_path): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/data/test_spark_dataset_source.py b/tests/data/test_spark_dataset_source.py index 7b68370f2696f..78c0e0ccfee9b 100644 --- a/tests/data/test_spark_dataset_source.py +++ b/tests/data/test_spark_dataset_source.py @@ -14,7 +14,7 @@ def spark_session(): with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/recipes/test_ingest_step.py b/tests/recipes/test_ingest_step.py index a5799d8f74131..4da8f2f9f5f4d 100644 --- a/tests/recipes/test_ingest_step.py +++ b/tests/recipes/test_ingest_step.py @@ -35,7 +35,7 @@ def spark_session(): with tempfile.TemporaryDirectory() as tmpdir: with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" diff --git a/tests/recipes/test_predict_step.py b/tests/recipes/test_predict_step.py index 60bb8d3846879..987bfabfc5298 100644 --- a/tests/recipes/test_predict_step.py +++ b/tests/recipes/test_predict_step.py @@ -28,7 +28,7 @@ def spark_session(): with tempfile.TemporaryDirectory() as tmpdir: with ( SparkSession.builder.master("local[*]") - .config("spark.jars.packages", "io.delta:delta-core_2.12:2.4.0") + .config("spark.jars.packages", "io.delta:delta-spark_2.12:3.0.0") .config("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension") .config( "spark.sql.catalog.spark_catalog", "org.apache.spark.sql.delta.catalog.DeltaCatalog" From 688e74b9b76d925b7ab86afc2f7189472beeb777 Mon Sep 17 00:00:00 2001 From: Harutaka Kawamura Date: Thu, 26 Oct 2023 12:02:15 +0900 Subject: [PATCH 097/101] Run lint job on push to master (#10151) Signed-off-by: harupy --- .github/workflows/master.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/master.yml b/.github/workflows/master.yml index 2dd5fe3269726..a38597b7000c4 100644 --- a/.github/workflows/master.yml +++ b/.github/workflows/master.yml @@ -32,7 +32,7 @@ jobs: lint: runs-on: ubuntu-latest timeout-minutes: 30 - if: github.event_name == 'pull_request' && github.event.pull_request.draft == false + if: github.event_name != 'pull_request' && github.event.pull_request.draft == false steps: - uses: actions/checkout@v3 - uses: ./.github/actions/untracked From bfc1959dc854da484eb314c96f7303ba29f593b7 Mon Sep 17 00:00:00 2001 From: Zac Davies <80654433+zacdav-db@users.noreply.github.com> Date: Thu, 26 Oct 2023 14:22:35 +1100 Subject: [PATCH 098/101] Adjustments to config retrieval when schema is Databricks (#10117) Signed-off-by: Zac Davies Co-authored-by: Zac Davies --- mlflow/R/mlflow/R/databricks-utils.R | 47 ++++++++++++++++++++-------- mlflow/R/mlflow/R/tracking-client.R | 2 +- 2 files changed, 35 insertions(+), 14 deletions(-) diff --git a/mlflow/R/mlflow/R/databricks-utils.R b/mlflow/R/mlflow/R/databricks-utils.R index aeb5a79a9a67c..5f1139ed5e27a 100644 --- a/mlflow/R/mlflow/R/databricks-utils.R +++ b/mlflow/R/mlflow/R/databricks-utils.R @@ -75,27 +75,48 @@ get_databricks_config_from_env <- function() { } get_databricks_config <- function(profile) { - config <- if (!is.na(profile)) { - get_databricks_config_for_profile(profile) - } else if (exists("spark.databricks.token") && exists("spark.databricks.api.url")) { + + # If a profile is provided, fetch its configuration + if (!is.na(profile)) { + config <- get_databricks_config_for_profile(profile) + if (databricks_config_is_valid(config)) { + return(config) + } + } + + # Check for environment variables + config <- get_databricks_config_from_env() + if (databricks_config_is_valid(config)) { + return(config) + } + + # Check 'DEFAULT' profile + config <- tryCatch({ + get_databricks_config_for_profile("DEFAULT") + }, error = function(e) { + # On error assume known invalid config + list(host = NA, token = NA, username = NA, password = NA) + }) + if (databricks_config_is_valid(config)) { + return(config) + } + + # When in Databricks (done last so other methods are explicit overrides) + if (exists("spark.databricks.token", envir = .GlobalEnv) && + exists("spark.databricks.api.url", envir = .GlobalEnv)) { config_vars <- list( host = get("spark.databricks.api.url", envir = .GlobalEnv), token = get("spark.databricks.token", envir = .GlobalEnv), insecure = Sys.getenv(config_variable_map$insecure, "False") ) - new_databricks_config(config_source = "db_dynamic", config_vars = config_vars) - } else { - config <- get_databricks_config_from_env() + config <- new_databricks_config(config_source = "db_dynamic", config_vars = config_vars) if (databricks_config_is_valid(config)) { - config - } else { - get_databricks_config_for_profile("DEFAULT") + return(config) } } - if (!databricks_config_is_valid(config)) { - stop("Could not find valid Databricks configuration.") - } - config + + # If no valid configuration is found by this point, raise an error + stop("Could not find valid Databricks configuration.") } #' Get information from Databricks Notebook environment diff --git a/mlflow/R/mlflow/R/tracking-client.R b/mlflow/R/mlflow/R/tracking-client.R index 56461a42961fd..4c0d8aa7157d3 100644 --- a/mlflow/R/mlflow/R/tracking-client.R +++ b/mlflow/R/mlflow/R/tracking-client.R @@ -28,7 +28,7 @@ new_mlflow_client_impl <- function(get_host_creds, get_cli_env = list, class = c ) } -new_mlflow_host_creds <- function( host = NA, username = NA, password = NA, token = NA, +new_mlflow_host_creds <- function(host = NA, username = NA, password = NA, token = NA, insecure = "False") { insecure_arg <- if (is.null(insecure) || is.na(insecure)) { "False" From 0d66ba2350a9869ec9a903dd90f85ce9e06a4fb6 Mon Sep 17 00:00:00 2001 From: sagarsumant Date: Wed, 25 Oct 2023 20:54:37 -0700 Subject: [PATCH 099/101] Introduce experimental flag "synchronous" to facilitate asynchronous logging (metrics/params/tags) in mlflow. (#9705) Signed-off-by: Sagar Sumant Signed-off-by: sagarsumant Signed-off-by: chenmoneygithub Co-authored-by: Sagar Sumant Co-authored-by: Harutaka Kawamura Co-authored-by: chenmoneygithub --- docs/source/python_api/mlflow.utils.rst | 10 + mlflow/__init__.py | 1 + mlflow/store/tracking/abstract_store.py | 66 +++- mlflow/tracking/_tracking_service/client.py | 99 +++++- mlflow/tracking/client.py | 76 ++++- mlflow/tracking/fluent.py | 106 +++++- mlflow/utils/async_logging/__init__.py | 1 + .../async_logging/async_logging_queue.py | 230 +++++++++++++ mlflow/utils/async_logging/run_batch.py | 33 ++ mlflow/utils/async_logging/run_operations.py | 54 ++++ .../test_async_logging_integration.py | 229 +++++++++++++ tests/tracking/fluent/test_fluent.py | 80 +++++ tests/tracking/test_client.py | 37 +++ tests/utils/test_async_logging_queue.py | 306 ++++++++++++++++++ 14 files changed, 1283 insertions(+), 45 deletions(-) create mode 100644 docs/source/python_api/mlflow.utils.rst create mode 100644 mlflow/utils/async_logging/__init__.py create mode 100644 mlflow/utils/async_logging/async_logging_queue.py create mode 100644 mlflow/utils/async_logging/run_batch.py create mode 100644 mlflow/utils/async_logging/run_operations.py create mode 100644 tests/integration/async_logging/test_async_logging_integration.py create mode 100644 tests/utils/test_async_logging_queue.py diff --git a/docs/source/python_api/mlflow.utils.rst b/docs/source/python_api/mlflow.utils.rst new file mode 100644 index 0000000000000..f45e107f91996 --- /dev/null +++ b/docs/source/python_api/mlflow.utils.rst @@ -0,0 +1,10 @@ +mlflow.utils +================== + +.. automodule:: mlflow.utils.async_logging + :members: + :undoc-members: + +.. automodule:: mlflow.utils.async_logging.run_operations + :members: + :undoc-members: \ No newline at end of file diff --git a/mlflow/__init__.py b/mlflow/__init__.py index 8f57ba5e66872..5b0c1689f5716 100644 --- a/mlflow/__init__.py +++ b/mlflow/__init__.py @@ -148,6 +148,7 @@ set_tags, start_run, ) +from mlflow.utils.async_logging.run_operations import RunOperations # noqa: F401 from mlflow.utils.credentials import login __all__ = [ diff --git a/mlflow/store/tracking/abstract_store.py b/mlflow/store/tracking/abstract_store.py index 69de91f8c7538..172aeeacb0cca 100644 --- a/mlflow/store/tracking/abstract_store.py +++ b/mlflow/store/tracking/abstract_store.py @@ -5,6 +5,8 @@ from mlflow.store.entities.paged_list import PagedList from mlflow.store.tracking import SEARCH_MAX_RESULTS_DEFAULT from mlflow.utils.annotations import developer_stable, experimental +from mlflow.utils.async_logging.async_logging_queue import AsyncLoggingQueue +from mlflow.utils.async_logging.run_operations import RunOperations @developer_stable @@ -21,7 +23,7 @@ def __init__(self): Empty constructor for now. This is deliberately not marked as abstract, else every derived class would be forced to create one. """ - pass + self._async_logging_queue = AsyncLoggingQueue(logging_func=self.log_batch) @abstractmethod def search_experiments( @@ -219,6 +221,15 @@ def log_metric(self, run_id, metric): """ self.log_batch(run_id, metrics=[metric], params=[], tags=[]) + def log_metric_async(self, run_id, metric) -> RunOperations: + """ + Log a metric for the specified run in async fashion. + + :param run_id: String id for the run + :param metric: :py:class:`mlflow.entities.Metric` instance to log + """ + return self.log_batch_async(run_id, metrics=[metric], params=[], tags=[]) + def log_param(self, run_id, param): """ Log a param for the specified run @@ -228,6 +239,15 @@ def log_param(self, run_id, param): """ self.log_batch(run_id, metrics=[], params=[param], tags=[]) + def log_param_async(self, run_id, param) -> RunOperations: + """ + Log a param for the specified run in async fashion. + + :param run_id: String id for the run + :param param: :py:class:`mlflow.entities.Param` instance to log + """ + return self.log_batch_async(run_id, metrics=[], params=[param], tags=[]) + def set_experiment_tag(self, experiment_id, tag): """ Set a tag for the specified experiment @@ -246,6 +266,15 @@ def set_tag(self, run_id, tag): """ self.log_batch(run_id, metrics=[], params=[], tags=[tag]) + def set_tag_async(self, run_id, tag) -> RunOperations: + """ + Set a tag for the specified run in async fashion. + + :param run_id: String id for the run + :param tag: :py:class:`mlflow.entities.RunTag` instance to set + """ + return self.log_batch_async(run_id, metrics=[], params=[], tags=[tag]) + @abstractmethod def get_metric_history(self, run_id, metric_key, max_results=None, page_token=None): """ @@ -296,13 +325,24 @@ def search_runs( meaningful in such cases. """ runs, token = self._search_runs( - experiment_ids, filter_string, run_view_type, max_results, order_by, page_token + experiment_ids, + filter_string, + run_view_type, + max_results, + order_by, + page_token, ) return PagedList(runs, token) @abstractmethod def _search_runs( - self, experiment_ids, filter_string, run_view_type, max_results, order_by, page_token + self, + experiment_ids, + filter_string, + run_view_type, + max_results, + order_by, + page_token, ): """ Return runs that match the given list of search expressions within the experiments, as @@ -332,6 +372,26 @@ def log_batch(self, run_id, metrics, params, tags): """ pass + def log_batch_async(self, run_id, metrics, params, tags) -> RunOperations: + """ + Log multiple metrics, params, and tags for the specified run in async fashion. + This API does not offer immediate consistency of the data. When API returns, + data is accepted but not persisted/processed by back end. Data would be processed + in near real time fashion. + + :param run_id: String id for the run + :param metrics: List of :py:class:`mlflow.entities.Metric` instances to log + :param params: List of :py:class:`mlflow.entities.Param` instances to log + :param tags: List of :py:class:`mlflow.entities.RunTag` instances to log + :return: None. + """ + if not self._async_logging_queue.is_active(): + self._async_logging_queue.activate() + + return self._async_logging_queue.log_batch_async( + run_id=run_id, metrics=metrics, params=params, tags=tags + ) + @abstractmethod def record_logged_model(self, run_id, mlflow_model): """ diff --git a/mlflow/tracking/_tracking_service/client.py b/mlflow/tracking/_tracking_service/client.py index eb374d101c4c0..da43ee35a39da 100644 --- a/mlflow/tracking/_tracking_service/client.py +++ b/mlflow/tracking/_tracking_service/client.py @@ -18,6 +18,7 @@ from mlflow.tracking._tracking_service import utils from mlflow.tracking.metric_value_conversion_utils import convert_metric_value_to_float_if_possible from mlflow.utils import chunk_list +from mlflow.utils.async_logging.run_operations import RunOperations, get_combined_run_operations from mlflow.utils.mlflow_tags import MLFLOW_USER from mlflow.utils.string_utils import is_string_type from mlflow.utils.time import get_current_time_millis @@ -261,7 +262,9 @@ def rename_experiment(self, experiment_id, new_name): """ self.store.rename_experiment(experiment_id, new_name) - def log_metric(self, run_id, key, value, timestamp=None, step=None): + def log_metric( + self, run_id, key, value, timestamp=None, step=None, synchronous=True + ) -> Optional[RunOperations]: """ Log a metric against the run ID. @@ -278,21 +281,47 @@ def log_metric(self, run_id, key, value, timestamp=None, step=None): may support larger values. :param timestamp: Time when this metric was calculated. Defaults to the current system time. :param step: Training step (iteration) at which was the metric calculated. Defaults to 0. + :param synchronous: *Experimental* If True, blocks until the metrics is logged + successfully. If False, logs the metrics asynchronously and + returns a future representing the logging operation. + + :return: When synchronous=True, returns None. + When synchronous=False, returns :py:class:`mlflow.RunOperations` that represents + future for logging operation. + """ timestamp = timestamp if timestamp is not None else get_current_time_millis() step = step if step is not None else 0 metric_value = convert_metric_value_to_float_if_possible(value) metric = Metric(key, metric_value, timestamp, step) - self.store.log_metric(run_id, metric) + if synchronous: + self.store.log_metric(run_id, metric) + else: + return self.store.log_metric_async(run_id, metric) - def log_param(self, run_id, key, value): + def log_param(self, run_id, key, value, synchronous=True): """ Log a parameter (e.g. model hyperparameter) against the run ID. Value is converted to a string. + + :param run_id: ID of the run to log the parameter against. + :param key: Name of the parameter. + :param value: Value of the parameter. + :param synchronous: *Experimental* If True, blocks until the parameters are logged + successfully. If False, logs the parameters asynchronously and + returns a future representing the logging operation. + + :return: When synchronous=True, returns parameter value. + When synchronous=False, returns :py:class:`mlflow.RunOperations` that + represents future for logging operation. """ param = Param(key, str(value)) try: - self.store.log_param(run_id, param) + if synchronous: + self.store.log_param(run_id, param) + return value + else: + return self.store.log_param_async(run_id, param) except MlflowException as e: if e.error_code == ErrorCode.Name(INVALID_PARAMETER_VALUE): msg = f"{e.message}{PARAM_VALIDATION_MSG}" @@ -311,7 +340,7 @@ def set_experiment_tag(self, experiment_id, key, value): tag = ExperimentTag(key, str(value)) self.store.set_experiment_tag(experiment_id, tag) - def set_tag(self, run_id, key, value): + def set_tag(self, run_id, key, value, synchronous=True) -> Optional[RunOperations]: """ Set a tag on the run with the specified ID. Value is converted to a string. @@ -323,9 +352,20 @@ def set_tag(self, run_id, key, value): :param value: Tag value (string, but will be string-ified if not). All backend stores will support values up to length 5000, but some may support larger values. + :param synchronous: *Experimental* If True, blocks until the tag is logged + successfully. If False, logs the tag asynchronously and + returns a future representing the logging operation. + + :return: When synchronous=True, returns None. + When synchronous=False, returns :py:class:`mlflow.RunOperations` object + that represents future for logging operation. + """ tag = RunTag(key, str(value)) - self.store.set_tag(run_id, tag) + if synchronous: + self.store.set_tag(run_id, tag) + else: + return self.store.set_tag_async(run_id, tag) def delete_tag(self, run_id, key): """ @@ -359,7 +399,9 @@ def update_run(self, run_id, status=None, name=None): run_name=name, ) - def log_batch(self, run_id, metrics=(), params=(), tags=()): + def log_batch( + self, run_id, metrics=(), params=(), tags=(), synchronous=True + ) -> Optional[RunOperations]: """ Log multiple metrics, params, and/or tags. @@ -367,9 +409,15 @@ def log_batch(self, run_id, metrics=(), params=(), tags=()): :param metrics: If provided, List of Metric(key, value, timestamp) instances. :param params: If provided, List of Param(key, value) instances. :param tags: If provided, List of RunTag(key, value) instances. + :param synchronous: *Experimental* If True, blocks until the metrics/tags/params are logged + successfully. If False, logs the metrics/tags/params asynchronously + and returns a future representing the logging operation. Raises an MlflowException if any errors occur. - :return: None + + :return: When synchronous=True, returns None. + When synchronous=False, returns :py:class:`mlflow.RunOperations` that + represents future for logging operation. """ if len(metrics) == 0 and len(params) == 0 and len(tags) == 0: return @@ -377,6 +425,12 @@ def log_batch(self, run_id, metrics=(), params=(), tags=()): param_batches = chunk_list(params, MAX_PARAMS_TAGS_PER_BATCH) tag_batches = chunk_list(tags, MAX_PARAMS_TAGS_PER_BATCH) + # When given data is split into one or more batches, we need to wait for all the batches. + # Each batch logged returns run_operations which we append to this list + # At the end we merge all the run_operations into a single run_operations object and return. + # Applicable only when synchronous is False + run_operations_list = [] + for params_batch, tags_batch in zip_longest(param_batches, tag_batches, fillvalue=[]): metrics_batch_size = min( MAX_ENTITIES_PER_BATCH - len(params_batch) - len(tags_batch), @@ -386,12 +440,33 @@ def log_batch(self, run_id, metrics=(), params=(), tags=()): metrics_batch = metrics[:metrics_batch_size] metrics = metrics[metrics_batch_size:] - self.store.log_batch( - run_id=run_id, metrics=metrics_batch, params=params_batch, tags=tags_batch - ) + if synchronous: + self.store.log_batch( + run_id=run_id, metrics=metrics_batch, params=params_batch, tags=tags_batch + ) + else: + run_operations_list.append( + self.store.log_batch_async( + run_id=run_id, + metrics=metrics_batch, + params=params_batch, + tags=tags_batch, + ) + ) for metrics_batch in chunk_list(metrics, chunk_size=MAX_METRICS_PER_BATCH): - self.store.log_batch(run_id=run_id, metrics=metrics_batch, params=[], tags=[]) + if synchronous: + self.store.log_batch(run_id=run_id, metrics=metrics_batch, params=[], tags=[]) + else: + run_operations_list.append( + self.store.log_batch_async( + run_id=run_id, metrics=metrics_batch, params=[], tags=[] + ) + ) + + if not synchronous: + # Merge all the run operations into a single run operations object + return get_combined_run_operations(run_operations_list) def log_inputs(self, run_id: str, datasets: Optional[List[DatasetInput]] = None): """ diff --git a/mlflow/tracking/client.py b/mlflow/tracking/client.py index 8fb6d9df6e7f2..f8eb1e5ee6aac 100644 --- a/mlflow/tracking/client.py +++ b/mlflow/tracking/client.py @@ -38,6 +38,7 @@ from mlflow.tracking.artifact_utils import _upload_artifacts_to_databricks from mlflow.tracking.registry import UnsupportedModelRegistryStoreURIException from mlflow.utils.annotations import experimental +from mlflow.utils.async_logging.run_operations import RunOperations from mlflow.utils.databricks_utils import get_databricks_run_url from mlflow.utils.logging_utils import eprint from mlflow.utils.mlflow_tags import ( @@ -693,7 +694,8 @@ def log_metric( value: float, timestamp: Optional[int] = None, step: Optional[int] = None, - ) -> None: + synchronous: bool = True, + ) -> Optional[RunOperations]: """ Log a metric against the run ID. @@ -710,6 +712,13 @@ def log_metric( :param timestamp: Time when this metric was calculated. Defaults to the current system time. :param step: Integer training step (iteration) at which was the metric calculated. Defaults to 0. + :param synchronous: *Experimental* If True, blocks until the metric is logged successfully. + If False, logs the metric asynchronously and returns a future + representing the logging operation. + + :return: When `synchronous=True`, returns None. When `synchronous=False`, returns an + :py:class:`mlflow.utils.async_logging.run_operations.RunOperations` instance that + represents future for logging operation. .. code-block:: python :caption: Example @@ -740,6 +749,9 @@ def print_run_info(r): run = client.get_run(run.info.run_id) print_run_info(run) + # To log metric in async fashion + client.log_metric(run.info.run_id, "m", 1.5, synchronous=False) + .. code-block:: text :caption: Output @@ -751,9 +763,13 @@ def print_run_info(r): metrics: {'m': 1.5} status: FINISHED """ - self._tracking_client.log_metric(run_id, key, value, timestamp, step) + return self._tracking_client.log_metric( + run_id, key, value, timestamp, step, synchronous=synchronous + ) - def log_param(self, run_id: str, key: str, value: Any) -> Any: + def log_param( + self, run_id: str, key: str, value: Any, synchronous: Optional[bool] = True + ) -> Any: """ Log a parameter (e.g. model hyperparameter) against the run ID. @@ -765,7 +781,13 @@ def log_param(self, run_id: str, key: str, value: Any) -> Any: :param value: Parameter value (string, but will be string-ified if not). All built-in backend stores support values up to length 6000, but some may support larger values. - :return: the parameter value that is logged. + :param synchronous: *Experimental* If True, blocks until the parameter is logged + successfully. If False, logs the parameter asynchronously and + returns a future representing the logging operation. + + :return: When `synchronous=True`, returns parameter value. When `synchronous=False`, + returns an :py:class:`mlflow.utils.async_logging.run_operations.RunOperations` + instance that represents future for logging operation. .. code-block:: python :caption: Example @@ -808,8 +830,11 @@ def print_run_info(r): params: {'p': '1'} status: FINISHED """ - self._tracking_client.log_param(run_id, key, value) - return value + if synchronous: + self._tracking_client.log_param(run_id, key, value, synchronous=True) + return value + else: + return self._tracking_client.log_param(run_id, key, value, synchronous=False) def set_experiment_tag(self, experiment_id: str, key: str, value: Any) -> None: """ @@ -842,7 +867,9 @@ def set_experiment_tag(self, experiment_id: str, key: str, value: Any) -> None: """ self._tracking_client.set_experiment_tag(experiment_id, key, value) - def set_tag(self, run_id: str, key: str, value: Any) -> None: + def set_tag( + self, run_id: str, key: str, value: Any, synchronous: bool = True + ) -> Optional[RunOperations]: """ Set a tag on the run with the specified ID. Value is converted to a string. @@ -854,6 +881,13 @@ def set_tag(self, run_id: str, key: str, value: Any) -> None: :param value: Tag value (string, but will be string-ified if not). All backend stores will support values up to length 5000, but some may support larger values. + :param synchronous: *Experimental* If True, blocks until the tag is logged successfully. + If False, logs the tag asynchronously and returns a future + representing the logging operation. + + :return: When `synchronous=True`, returns None. When `synchronous=False`, returns an + :py:class:`mlflow.utils.async_logging.run_operations.RunOperations` instance that + represents future for logging operation. .. code-block:: python :caption: Example @@ -887,7 +921,7 @@ def print_run_info(run): run_id: 4f226eb5758145e9b28f78514b59a03b Tags: {'nlp.framework': 'Spark NLP'} """ - self._tracking_client.set_tag(run_id, key, value) + return self._tracking_client.set_tag(run_id, key, value, synchronous=synchronous) def delete_tag(self, run_id: str, key: str) -> None: """ @@ -986,7 +1020,8 @@ def log_batch( metrics: Sequence[Metric] = (), params: Sequence[Param] = (), tags: Sequence[RunTag] = (), - ) -> None: + synchronous: bool = True, + ) -> Optional[RunOperations]: """ Log multiple metrics, params, and/or tags. @@ -994,9 +1029,15 @@ def log_batch( :param metrics: If provided, List of Metric(key, value, timestamp) instances. :param params: If provided, List of Param(key, value) instances. :param tags: If provided, List of RunTag(key, value) instances. + :param synchronous: *Experimental* If True, blocks until the metrics/tags/params are logged + successfully. If False, logs the metrics/tags/params asynchronously + and returns a future representing the logging operation. Raises an MlflowException if any errors occur. - :return: None + + :return: When `synchronous=True`, returns None. When `synchronous=False`, returns an + :py:class:`mlflow.utils.async_logging.run_operations.RunOperations` instance that + represents future for logging operation. .. code-block:: python :caption: Example @@ -1030,6 +1071,9 @@ def print_run_info(r): run = client.get_run(run.info.run_id) print_run_info(run) + # To log metric in async fashion + client.log_metric(run.info.run_id, "m", 1.5, synchronous=False) + .. code-block:: text :caption: Output @@ -1039,7 +1083,9 @@ def print_run_info(r): tags: {'t': 't'} status: FINISHED """ - self._tracking_client.log_batch(run_id, metrics, params, tags) + return self._tracking_client.log_batch( + run_id, metrics, params, tags, synchronous=synchronous + ) @experimental def log_inputs( @@ -1673,7 +1719,9 @@ def get_artifact_data(run): if artifact_file in artifacts: with tempfile.TemporaryDirectory() as tmpdir: downloaded_artifact_path = mlflow.artifacts.download_artifacts( - run_id=run_id, artifact_path=artifact_file, dst_path=tmpdir + run_id=run_id, + artifact_path=artifact_file, + dst_path=tmpdir, ) existing_predictions = pd.read_json(downloaded_artifact_path, orient="split") if extra_columns is not None: @@ -3118,8 +3166,8 @@ def search_model_versions( ) def get_model_version_stages( - self, name: str, version: str # pylint: disable=unused-argument - ) -> List[str]: + self, name: str, version: str + ) -> List[str]: # pylint: disable=unused-argument """ :return: A list of valid stages. diff --git a/mlflow/tracking/fluent.py b/mlflow/tracking/fluent.py index cafccf6e6bc94..af21aa0163a13 100644 --- a/mlflow/tracking/fluent.py +++ b/mlflow/tracking/fluent.py @@ -42,6 +42,7 @@ from mlflow.tracking.default_experiment import registry as default_experiment_registry from mlflow.utils import get_results_from_paginated_fn from mlflow.utils.annotations import experimental +from mlflow.utils.async_logging.run_operations import RunOperations from mlflow.utils.autologging_utils import ( AUTOLOGGING_CONF_KEY_IS_GLOBALLY_CONFIGURED, AUTOLOGGING_INTEGRATIONS, @@ -70,6 +71,7 @@ import PIL import plotly + _active_run_stack = [] run_id_to_system_metrics_monitor = {} _active_experiment_id = None @@ -596,7 +598,7 @@ def get_parent_run(run_id: str) -> Optional[Run]: return MlflowClient().get_parent_run(run_id) -def log_param(key: str, value: Any) -> Any: +def log_param(key: str, value: Any, synchronous: bool = True) -> Any: """ Log a parameter (e.g. model hyperparameter) under the current run. If no run is active, this method will create a new active run. @@ -608,8 +610,13 @@ def log_param(key: str, value: Any) -> Any: :param value: Parameter value (string, but will be string-ified if not). All built-in backend stores support values up to length 6000, but some may support larger values. + :param synchronous: *Experimental* If True, blocks until the parameter is logged + successfully. If False, logs the parameter asynchronously and + returns a future representing the logging operation. - :return: the parameter value that is logged. + :return: When `synchronous=True`, returns parameter value. When `synchronous=False`, returns an + :py:class:`mlflow.utils.async_logging.run_operations.RunOperations` instance that + represents future for logging operation. .. testcode:: python :caption: Example @@ -619,9 +626,10 @@ def log_param(key: str, value: Any) -> Any: with mlflow.start_run(): value = mlflow.log_param("learning_rate", 0.01) assert value == 0.01 + value = mlflow.log_param("learning_rate", 0.02, synchronous=False) """ run_id = _get_or_start_run().info.run_id - return MlflowClient().log_param(run_id, key, value) + return MlflowClient().log_param(run_id, key, value, synchronous=synchronous) def set_experiment_tag(key: str, value: Any) -> None: @@ -648,7 +656,7 @@ def set_experiment_tag(key: str, value: Any) -> None: MlflowClient().set_experiment_tag(experiment_id, key, value) -def set_tag(key: str, value: Any) -> None: +def set_tag(key: str, value: Any, synchronous: bool = True) -> Optional[RunOperations]: """ Set a tag under the current run. If no run is active, this method will create a new active run. @@ -660,17 +668,29 @@ def set_tag(key: str, value: Any) -> None: :param value: Tag value (string, but will be string-ified if not). All backend stores will support values up to length 5000, but some may support larger values. + :param synchronous: *Experimental* If True, blocks until the tag is logged + successfully. If False, logs the tag asynchronously and + returns a future representing the logging operation. + + :return: When `synchronous=True`, returns None. When `synchronous=False`, returns an + :py:class:`mlflow.utils.async_logging.run_operations.RunOperations` instance that + represents future for logging operation. .. testcode:: python :caption: Example import mlflow + # Set a tag. with mlflow.start_run(): mlflow.set_tag("release.version", "2.2.0") + + # Set a tag in async fashion. + with mlflow.start_run(): + mlflow.set_tag("release.version", "2.2.1", synchronous=False) """ run_id = _get_or_start_run().info.run_id - MlflowClient().set_tag(run_id, key, value) + return MlflowClient().set_tag(run_id, key, value, synchronous=synchronous) def delete_tag(key: str) -> None: @@ -697,7 +717,9 @@ def delete_tag(key: str) -> None: MlflowClient().delete_tag(run_id, key) -def log_metric(key: str, value: float, step: Optional[int] = None) -> None: +def log_metric( + key: str, value: float, step: Optional[int] = None, synchronous: bool = True +) -> Optional[RunOperations]: """ Log a metric under the current run. If no run is active, this method will create a new active run. @@ -712,20 +734,36 @@ def log_metric(key: str, value: float, step: Optional[int] = None) -> None: All backend stores will support values up to length 5000, but some may support larger values. :param step: Metric step (int). Defaults to zero if unspecified. + :param synchronous: *Experimental* If True, blocks until the parameter is logged + successfully. If False, logs the parameter asynchronously and + returns a future representing the logging operation. + + :return: When `synchronous=True`, returns None. + When `synchronous=False`, returns `RunOperations` that represents future for + logging operation. .. testcode:: python :caption: Example import mlflow + # Log a metric with mlflow.start_run(): mlflow.log_metric("mse", 2500.00) + + # Log a metric in async fashion. + with mlflow.start_run(): + mlflow.log_metric("mse", 2500.00, synchronous=False) """ run_id = _get_or_start_run().info.run_id - MlflowClient().log_metric(run_id, key, value, get_current_time_millis(), step or 0) + return MlflowClient().log_metric( + run_id, key, value, get_current_time_millis(), step or 0, synchronous=synchronous + ) -def log_metrics(metrics: Dict[str, float], step: Optional[int] = None) -> None: +def log_metrics( + metrics: Dict[str, float], step: Optional[int] = None, synchronous: bool = True +) -> Optional[RunOperations]: """ Log multiple metrics for the current run. If no run is active, this method will create a new active run. @@ -737,7 +775,13 @@ def log_metrics(metrics: Dict[str, float], step: Optional[int] = None) -> None: :param step: A single integer step at which to log the specified Metrics. If unspecified, each metric is logged at step zero. - :returns: None + :param synchronous: *Experimental* If True, blocks until the metrics is logged + successfully. If False, logs the metrics asynchronously and + returns a future representing the logging operation. + + :return: When `synchronous=True`, returns None. When `synchronous=False`, returns an + :py:class:`mlflow.utils.async_logging.run_operations.RunOperations` instance that + represents future for logging operation. .. testcode:: python :caption: Example @@ -749,21 +793,33 @@ def log_metrics(metrics: Dict[str, float], step: Optional[int] = None) -> None: # Log a batch of metrics with mlflow.start_run(): mlflow.log_metrics(metrics) + + # Log a batch of metrics in async fashion. + with mlflow.start_run(): + mlflow.log_metrics(metrics, synchronous=False) """ run_id = _get_or_start_run().info.run_id timestamp = get_current_time_millis() metrics_arr = [Metric(key, value, timestamp, step or 0) for key, value in metrics.items()] - MlflowClient().log_batch(run_id=run_id, metrics=metrics_arr, params=[], tags=[]) + return MlflowClient().log_batch( + run_id=run_id, metrics=metrics_arr, params=[], tags=[], synchronous=synchronous + ) -def log_params(params: Dict[str, Any]) -> None: +def log_params(params: Dict[str, Any], synchronous: bool = True) -> Optional[RunOperations]: """ Log a batch of params for the current run. If no run is active, this method will create a new active run. :param params: Dictionary of param_name: String -> value: (String, but will be string-ified if not) - :returns: None + :param synchronous: *Experimental* If True, blocks until the parameters are logged + successfully. If False, logs the parameters asynchronously and + returns a future representing the logging operation. + + :return: When `synchronous=True`, returns None. When `synchronous=False`, returns an + :py:class:`mlflow.utils.async_logging.run_operations.RunOperations` instance that + represents future for logging operation. .. testcode:: python :caption: Example @@ -775,10 +831,16 @@ def log_params(params: Dict[str, Any]) -> None: # Log a batch of parameters with mlflow.start_run(): mlflow.log_params(params) + + # Log a batch of parameters in async fashion. + with mlflow.start_run(): + mlflow.log_params(params, synchronous=False) """ run_id = _get_or_start_run().info.run_id params_arr = [Param(key, str(value)) for key, value in params.items()] - MlflowClient().log_batch(run_id=run_id, metrics=[], params=params_arr, tags=[]) + return MlflowClient().log_batch( + run_id=run_id, metrics=[], params=params_arr, tags=[], synchronous=synchronous + ) @experimental @@ -844,14 +906,20 @@ def set_experiment_tags(tags: Dict[str, Any]) -> None: set_experiment_tag(key, value) -def set_tags(tags: Dict[str, Any]) -> None: +def set_tags(tags: Dict[str, Any], synchronous: bool = True) -> Optional[RunOperations]: """ Log a batch of tags for the current run. If no run is active, this method will create a new active run. :param tags: Dictionary of tag_name: String -> value: (String, but will be string-ified if not) - :returns: None + :param synchronous: *Experimental* If True, blocks until the tag is logged + successfully. If False, logs the tag asynchronously and + returns a future representing the logging operation. + + :return: When `synchronous=True`, returns None. When `synchronous=False`, returns an + :py:class:`mlflow.utils.async_logging.run_operations.RunOperations` instance that + represents future for logging operation. .. testcode:: python :caption: Example @@ -867,10 +935,16 @@ def set_tags(tags: Dict[str, Any]) -> None: # Set a batch of tags with mlflow.start_run(): mlflow.set_tags(tags) + + # Set a batch of tags in async fashion. + with mlflow.start_run(): + mlflow.set_tags(tags, synchronous=False) """ run_id = _get_or_start_run().info.run_id tags_arr = [RunTag(key, str(value)) for key, value in tags.items()] - MlflowClient().log_batch(run_id=run_id, metrics=[], params=[], tags=tags_arr) + return MlflowClient().log_batch( + run_id=run_id, metrics=[], params=[], tags=tags_arr, synchronous=synchronous + ) def log_artifact(local_path: str, artifact_path: Optional[str] = None) -> None: diff --git a/mlflow/utils/async_logging/__init__.py b/mlflow/utils/async_logging/__init__.py new file mode 100644 index 0000000000000..245b74b4eebc6 --- /dev/null +++ b/mlflow/utils/async_logging/__init__.py @@ -0,0 +1 @@ +from mlflow.utils.async_logging import run_operations # noqa: F401 diff --git a/mlflow/utils/async_logging/async_logging_queue.py b/mlflow/utils/async_logging/async_logging_queue.py new file mode 100644 index 0000000000000..77579b75db853 --- /dev/null +++ b/mlflow/utils/async_logging/async_logging_queue.py @@ -0,0 +1,230 @@ +""" +Defines an AsyncLoggingQueue that provides async fashion logging of metrics/tags/params using +queue based approach. +""" + +import atexit +import logging +import threading +from concurrent.futures import ThreadPoolExecutor +from queue import Empty, Queue + +from mlflow.entities.metric import Metric +from mlflow.entities.param import Param +from mlflow.entities.run_tag import RunTag +from mlflow.utils.async_logging.run_batch import RunBatch +from mlflow.utils.async_logging.run_operations import RunOperations + +_logger = logging.getLogger(__name__) + + +class AsyncLoggingQueue: + """ + This is a queue based run data processor that queues incoming batches and processes them using + single worker thread. + """ + + def __init__(self, logging_func: callable([str, [Metric], [Param], [RunTag]])) -> None: + """Initializes an AsyncLoggingQueue object. + + Args: + logging_func: A callable function that takes in four arguments: a string + representing the run_id, a list of Metric objects, + a list of Param objects, and a list of RunTag objects. + """ + self._queue = Queue() + self._lock = threading.RLock() + self._logging_func = logging_func + + self._is_activated = False + + def _at_exit_callback(self) -> None: + """Callback function to be executed when the program is exiting. + + Stops the data processing thread and waits for the queue to be drained. Finally, shuts down + the thread pools used for data logging and batch processing status check. + """ + try: + # Stop the data processing thread + self._stop_data_logging_thread_event.set() + # Waits till queue is drained. + self._run_data_logging_thread.result() + self._batch_logging_threadpool.shutdown(wait=False) + self._batch_status_check_threadpool.shutdown(wait=False) + except Exception as e: + _logger.error(f"Encountered error while trying to finish logging: {e}") + + def _logging_loop(self) -> None: + """ + Continuously logs run data until `self._continue_to_process_data` is set to False. + If an exception occurs during logging, a `MlflowException` is raised. + """ + try: + while not self._stop_data_logging_thread_event.is_set(): + self._log_run_data() + except Exception as e: + from mlflow.exceptions import MlflowException + + raise MlflowException(f"Exception inside the run data logging thread: {e}") + + def _log_run_data(self) -> None: + """Process the run data in the running runs queues. + + For each run in the running runs queues, this method retrieves the next batch of run data + from the queue and processes it by calling the `_processing_func` method with the run ID, + metrics, parameters, and tags in the batch. If the batch is empty, it is skipped. After + processing the batch, the processed watermark is updated and the batch event is set. + If an exception occurs during processing, the exception is logged and the batch event is set + with the exception. If the queue is empty, it is ignored. + + Returns: None + """ + run_batch = None # type: RunBatch + try: + run_batch = self._queue.get(timeout=1) + except Empty: + # Ignore empty queue exception + return + try: + self._logging_func( + run_id=run_batch.run_id, + metrics=run_batch.metrics, + params=run_batch.params, + tags=run_batch.tags, + ) + + # Signal the batch processing is done. + run_batch.completion_event.set() + + except Exception as e: + _logger.error(f"Run Id {run_batch.run_id}: Failed to log run data: Exception: {e}") + run_batch.exception = e + run_batch.completion_event.set() + + def _wait_for_batch(self, batch: RunBatch) -> None: + """Wait for the given batch to be processed by the logging thread. + + Args: + batch: The batch to wait for. + + Raises: + Exception: If an exception occurred while processing the batch. + """ + batch.completion_event.wait() + if batch.exception: + raise batch.exception + + def __getstate__(self): + """Return the state of the object for pickling. + + This method is called by the `pickle` module when the object is being pickled. It returns a + dictionary containing the object's state, with non-picklable attributes removed. + + Returns: + dict: A dictionary containing the object's state. + """ + state = self.__dict__.copy() + del state["_queue"] + del state["_lock"] + del state["_is_activated"] + + if "_run_data_logging_thread" in state: + del state["_run_data_logging_thread"] + if "_stop_data_logging_thread_event" in state: + del state["_stop_data_logging_thread_event"] + if "_batch_logging_threadpool" in state: + del state["_batch_logging_threadpool"] + if "_batch_status_check_threadpool" in state: + del state["_batch_status_check_threadpool"] + if "_run_data_logging_thread" in state: + del state["_run_data_logging_thread"] + if "_stop_data_logging_thread_event" in state: + del state["_stop_data_logging_thread_event"] + + return state + + def __setstate__(self, state): + """Set the state of the object from a given state dictionary. + + It pops back the removed non-picklable attributes from `self.__getstate__()`. + + Args: + state (dict): A dictionary containing the state of the object. + + Returns: + None + """ + self.__dict__.update(state) + self._queue = Queue() + self._lock = threading.RLock() + self._is_activated = False + self._batch_logging_threadpool = None + self._batch_status_check_threadpool = None + self._stop_data_logging_thread_event = None + + def log_batch_async( + self, run_id: str, params: [Param], tags: [RunTag], metrics: [Metric] + ) -> RunOperations: + """Asynchronously logs a batch of run data (parameters, tags, and metrics). + + Args: + run_id (str): The ID of the run to log data for. + params (list[mlflow.entities.Param]): A list of parameters to log for the run. + tags (list[mlflow.entities.RunTag]): A list of tags to log for the run. + metrics (list[mlflow.entities.Metric]): A list of metrics to log for the run. + + Returns: + mlflow.utils.async_utils.RunOperations: An object that encapsulates the + asynchronous operation of logging the batch of run data. + The object contains a list of `concurrent.futures.Future` objects that can be used + to check the status of the operation and retrieve any exceptions + that occurred during the operation. + """ + from mlflow import MlflowException + + if not self._is_activated: + raise MlflowException("AsyncLoggingQueue is not activated.") + batch = RunBatch( + run_id=run_id, + params=params, + tags=tags, + metrics=metrics, + completion_event=threading.Event(), + ) + + self._queue.put(batch) + + operation_future = self._batch_status_check_threadpool.submit(self._wait_for_batch, batch) + return RunOperations(operation_futures=[operation_future]) + + def is_active(self) -> bool: + return self._is_activated + + def activate(self) -> None: + """Activates the async logging queue + + 1. Initializes queue draining thread. + 2. Initializes threads for checking the status of logged batch. + 3. Registering an atexit callback to ensure that any remaining log data + is flushed before the program exits. + + If the queue is already activated, this method does nothing. + """ + with self._lock: + if self._is_activated: + return + + self._stop_data_logging_thread_event = threading.Event() + + # Keeping max_workers=1 so that there are no two threads + self._batch_logging_threadpool = ThreadPoolExecutor(max_workers=1) + + self._batch_status_check_threadpool = ThreadPoolExecutor(max_workers=10) + + self._run_data_logging_thread = self._batch_logging_threadpool.submit( + self._logging_loop + ) # concurrent.futures.Future[self._logging_loop] + + atexit.register(self._at_exit_callback) + + self._is_activated = True diff --git a/mlflow/utils/async_logging/run_batch.py b/mlflow/utils/async_logging/run_batch.py new file mode 100644 index 0000000000000..de89ecf1104ae --- /dev/null +++ b/mlflow/utils/async_logging/run_batch.py @@ -0,0 +1,33 @@ +import threading +from typing import List + +from mlflow.entities.metric import Metric +from mlflow.entities.param import Param +from mlflow.entities.run_tag import RunTag + + +class RunBatch: + def __init__( + self, + run_id: str, + params: List[Param], + tags: List[RunTag], + metrics: List[Metric], + completion_event: threading.Event, + ) -> None: + """ + Initializes an instance of RunBatch. + + Args: + run_id: The ID of the run. + params: A list of parameters. + tags: A list of tags. + metrics: A list of metrics. + completion_event: A threading.Event object. + """ + self.run_id = run_id + self.params = params or [] + self.tags = tags or [] + self.metrics = metrics or [] + self.completion_event = completion_event + self.exception = None diff --git a/mlflow/utils/async_logging/run_operations.py b/mlflow/utils/async_logging/run_operations.py new file mode 100644 index 0000000000000..90b92c3525821 --- /dev/null +++ b/mlflow/utils/async_logging/run_operations.py @@ -0,0 +1,54 @@ +class RunOperations: + """ + Represents a collection of operations on one or more MLflow Runs, such as run creation + or metric logging. + """ + + def __init__(self, operation_futures): + self._operation_futures = operation_futures or [] + + def wait(self): + """ + Blocks on completion of the MLflow Run operations. + """ + from mlflow.exceptions import MlflowException + + failed_operations = [] + for future in self._operation_futures: + try: + future.result() + except Exception as e: + failed_operations.append(e) + + if len(failed_operations) > 0: + # Importing MlflowException gives circular reference / module load error, need to + # figure out why. + raise MlflowException( + "The following failures occurred while performing one or more logging" + + f" operations: {failed_operations}" + ) + + +def get_combined_run_operations(run_operations_list: [RunOperations]) -> RunOperations: + """ + Given a list of RunOperations, returns a single RunOperations object that represents the + combined set of operations. If the input list is empty, returns None. If the input list + contains only one element, returns that element. Otherwise, creates a new RunOperations + object that combines the operation futures from each input RunOperations object. + + :param run_operations_list: A list of RunOperations objects to combine. + :type run_operations_list: list[RunOperations] + :return: A single RunOperations object that represents the combined set of operations. + :rtype: RunOperations + """ + if not run_operations_list: + return None + if len(run_operations_list) == 1: + return run_operations_list[0] + + if len(run_operations_list) > 1: + operation_futures = [] + for run_operations in run_operations_list: + if run_operations and run_operations._operation_futures: + operation_futures.extend(run_operations._operation_futures) + return RunOperations(operation_futures) diff --git a/tests/integration/async_logging/test_async_logging_integration.py b/tests/integration/async_logging/test_async_logging_integration.py new file mode 100644 index 0000000000000..eba6674c2cc2e --- /dev/null +++ b/tests/integration/async_logging/test_async_logging_integration.py @@ -0,0 +1,229 @@ +import io +import pickle +import time +import uuid + +import mlflow +from mlflow import MlflowClient +from mlflow.entities.metric import Metric +from mlflow.entities.param import Param +from mlflow.entities.run_tag import RunTag + + +def test_async_logging_mlflow_client_pickle(): + experiment_name = f"mlflow-async-logging-pickle-test-{str(uuid.uuid4())[:8]}" + mlflow_client = MlflowClient() + + buffer = io.BytesIO() + pickle.dump(mlflow_client, buffer) + + deserialized_mlflow_client = pickle.loads(buffer.getvalue()) # Type: MlflowClient + experiment_id = deserialized_mlflow_client.create_experiment(experiment_name) + + run = deserialized_mlflow_client.create_run(experiment_id=experiment_id) + run_id = run.info.run_id + + run_operations = [] + + params_to_log = [] + param1 = Param("async param 1", "async param 1 value") + run_operations.append( + mlflow_client.log_param(run_id, param1.key, param1.value, synchronous=False) + ) + params_to_log.append(param1) + + for run_operation in run_operations: + run_operation.wait() + run = mlflow_client.get_run(run_id) + assert param1.key in run.data.params + assert param1.value == run.data.params[param1.key] + + +def test_async_logging_mlflow_client(): + experiment_name = f"mlflow-async-logging-test-{str(uuid.uuid4())[:8]}" + mlflow_client = MlflowClient() + experiment_id = mlflow_client.create_experiment(experiment_name) + + run = mlflow_client.create_run(experiment_id=experiment_id) + run_id = run.info.run_id + + run_operations = [] + + params_to_log = [] + param1 = Param("async param 1", "async param 1 value") + run_operations.append( + mlflow_client.log_param(run_id, param1.key, param1.value, synchronous=False) + ) + params_to_log.append(param1) + + tags_to_log = [] + tag1 = RunTag("async tag 1", "async tag 1 value") + run_operations.append(mlflow_client.set_tag(run_id, tag1.key, tag1.value, synchronous=False)) + tags_to_log.append(tag1) + + metrics_to_log = [] + metric1 = Metric("async metric 1", 1, 132, 0) + run_operations.append( + mlflow_client.log_metric( + run_id, metric1.key, metric1.value, metric1.timestamp, metric1.step, synchronous=False + ) + ) + metrics_to_log.append(metric1) + + # Log batch of metrics + metric_value = 1 + for _ in range(1, 5): + metrics = [] + guid8 = str(uuid.uuid4())[:8] + params = [Param(f"batch param-{guid8}-{val}", value=str(val)) for val in range(1)] + tags = [RunTag(f"batch tag-{guid8}-{val}", value=str(val)) for val in range(1)] + for _ in range(0, 50): + metric_value += 1 + metrics.append( + Metric( + key=f"batch metrics async-{metric_value}", + value=time.time(), + timestamp=metric_value, + step=0, + ) + ) + + params_to_log.extend(params) + tags_to_log.extend(tags) + metrics_to_log.extend(metrics) + run_operation = mlflow_client.log_batch( + run_id, + params=params, + tags=tags, + metrics=metrics, + synchronous=False, + ) + run_operations.append(run_operation) + + for run_operation in run_operations: + run_operation.wait() + + run = mlflow_client.get_run(run_id) + for tag in tags_to_log: + assert tag.key in run.data.tags + assert tag.value == run.data.tags[tag.key] + for param in params_to_log: + assert param.key in run.data.params + assert param.value == run.data.params[param.key] + for metric in metrics_to_log: + assert metric.key in run.data.metrics + assert metric.value == run.data.metrics[metric.key] + + mlflow_client.set_terminated(run_id=run_id, status="FINISHED", end_time=time.time()) + + +def test_async_logging_fluent(): + experiment_name = f"mlflow-async-logging-test-{str(uuid.uuid4())[:8]}" + experiment_id = mlflow.create_experiment(experiment_name) + + run_operations = [] + + with mlflow.start_run(experiment_id=experiment_id) as run: + run_id = run.info.run_id + params_to_log = [] + param1 = Param("async param 1", "async param 1 value") + run_operations.append(mlflow.log_param(param1.key, param1.value, synchronous=False)) + params_to_log.append(param1) + + tags_to_log = [] + tag1 = RunTag("async tag 1", "async tag 1 value") + run_operations.append(mlflow.set_tag(tag1.key, tag1.value, synchronous=False)) + tags_to_log.append(tag1) + + metrics_to_log = [] + metric1 = Metric("async metric 1", 1, 432, 0) + run_operations.append(mlflow.log_metric(metric1.key, metric1.value, synchronous=False)) + metrics_to_log.append(metric1) + + # Log batch of metrics + metric_value = 1 + for _ in range(1, 5): + metrics = [] + guid8 = str(uuid.uuid4())[:8] + params = [Param(f"batch param-{guid8}-{val}", value=str(val)) for val in range(5)] + tags = [RunTag(f"batch tag-{guid8}-{val}", value=str(val)) for val in range(5)] + for _ in range(0, 50): + metric_value += 1 + metrics.append( + Metric( + key=f"batch metrics async-{metric_value}", + value=time.time(), + timestamp=metric_value, + step=0, + ) + ) + + params_to_log.extend(params) + run_operation = mlflow.log_params( + params={param.key: param.value for param in params}, + synchronous=False, + ) + run_operations.append(run_operation) + + tags_to_log.extend(tags) + run_operation = mlflow.set_tags( + tags={tag.key: tag.value for tag in tags}, + synchronous=False, + ) + run_operations.append(run_operation) + + metrics_to_log.extend(metrics) + run_operation = mlflow.log_metrics( + metrics={metric.key: metric.value for metric in metrics}, + step=1, + synchronous=False, + ) + run_operations.append(run_operation) + + for run_operation in run_operations: + run_operation.wait() + + run = mlflow.run + run = mlflow.get_run(run_id) + for tag in tags_to_log: + assert tag.key in run.data.tags + assert tag.value == run.data.tags[tag.key] + for param in params_to_log: + assert param.key in run.data.params + assert param.value == run.data.params[param.key] + for metric in metrics_to_log: + assert metric.key in run.data.metrics + assert metric.value == run.data.metrics[metric.key] + + +def test_async_logging_fluent_check_batch_split(): + # Check that batch is split into multiple requests if it exceeds the maximum size + # and if we wait for RunOperations returned then at the end everything should be logged. + experiment_name = f"mlflow-async-logging-test-{str(uuid.uuid4())[:8]}" + experiment_id = mlflow.create_experiment(experiment_name) + + run_operations = [] + + with mlflow.start_run(experiment_id=experiment_id) as run: + run_id = run.info.run_id + + metrics_to_log = { + f"batch metrics async-{metric_value}": metric_value for metric_value in range(0, 10000) + } + + run_operations = mlflow.log_metrics( + metrics=metrics_to_log, + step=1, + synchronous=False, + ) + + run_operations.wait() + + # Total 10000 metrics logged, max batch size =1000, so 10 requests will be sent. + assert len(run_operations._operation_futures) == 10 + + run = mlflow.run + run = mlflow.get_run(run_id) + for metric_key, metric_value in metrics_to_log.items(): + assert metric_key in run.data.metrics + assert metric_value == run.data.metrics[metric_key] diff --git a/tests/tracking/fluent/test_fluent.py b/tests/tracking/fluent/test_fluent.py index f7dc5e9371917..f0716de5814b8 100644 --- a/tests/tracking/fluent/test_fluent.py +++ b/tests/tracking/fluent/test_fluent.py @@ -1289,3 +1289,83 @@ def test_get_parent_run(): assert parent_run.data.params == {"a": "1"} assert mlflow.get_parent_run(run_id) is None + + +def test_log_metric_async(): + run_operations = [] + + with mlflow.start_run() as parent: + for num in range(100): + run_operations.append( + mlflow.log_metric("async single metric", step=num, value=num, synchronous=False) + ) + metrics = {f"async batch metric {num}": num for num in range(100)} + run_operations.append(mlflow.log_metrics(metrics=metrics, step=1, synchronous=False)) + + for run_operation in run_operations: + run_operation.wait() + parent_run = mlflow.get_run(parent.info.run_id) + assert parent_run.info.run_id == parent.info.run_id + assert parent_run.data.metrics["async single metric"] == 99 + for num in range(100): + assert parent_run.data.metrics[f"async batch metric {num}"] == num + + +def test_log_metric_async_throws(): + with mlflow.start_run(): + with pytest.raises(MlflowException, match="Please specify value as a valid double"): + mlflow.log_metric( + "async single metric", step=1, value="single metric value", synchronous=False + ).wait() + + with pytest.raises(MlflowException, match="Please specify value as a valid double"): + mlflow.log_metrics( + metrics={f"async batch metric {num}": "batch metric value" for num in range(2)}, + step=1, + synchronous=False, + ).wait() + + +def test_log_param_async(): + run_operations = [] + + with mlflow.start_run() as parent: + run_operations.append(mlflow.log_param("async single param", value="1", synchronous=False)) + params = {f"async batch param {num}": num for num in range(100)} + run_operations.append(mlflow.log_params(params=params, synchronous=False)) + + for run_operation in run_operations: + run_operation.wait() + parent_run = mlflow.get_run(parent.info.run_id) + assert parent_run.info.run_id == parent.info.run_id + assert parent_run.data.params["async single param"] == "1" + for num in range(100): + assert parent_run.data.params[f"async batch param {num}"] == str(num) + + +def test_log_param_async_throws(): + with mlflow.start_run(): + mlflow.log_param("async single param", value="1", synchronous=False) + with pytest.raises(MlflowException, match="Changing param values is not allowed"): + mlflow.log_param("async single param", value="2", synchronous=False).wait() + + mlflow.log_params({"async batch param": "2"}, synchronous=False) + with pytest.raises(MlflowException, match="Changing param values is not allowed"): + mlflow.log_params({"async batch param": "3"}, synchronous=False).wait() + + +def test_set_tag_async(): + run_operations = [] + + with mlflow.start_run() as parent: + run_operations.append(mlflow.set_tag("async single tag", value="1", synchronous=False)) + tags = {f"async batch tag {num}": num for num in range(100)} + run_operations.append(mlflow.set_tags(tags=tags, synchronous=False)) + + for run_operation in run_operations: + run_operation.wait() + parent_run = mlflow.get_run(parent.info.run_id) + assert parent_run.info.run_id == parent.info.run_id + assert parent_run.data.tags["async single tag"] == "1" + for num in range(100): + assert parent_run.data.tags[f"async batch tag {num}"] == str(num) diff --git a/tests/tracking/test_client.py b/tests/tracking/test_client.py index 540fe67390f13..d1cf1499d78a8 100644 --- a/tests/tracking/test_client.py +++ b/tests/tracking/test_client.py @@ -5,8 +5,10 @@ from mlflow import MlflowClient from mlflow.entities import ExperimentTag, Run, RunInfo, RunStatus, RunTag, SourceType, ViewType +from mlflow.entities.metric import Metric from mlflow.entities.model_registry import ModelVersion, ModelVersionTag from mlflow.entities.model_registry.model_version_status import ModelVersionStatus +from mlflow.entities.param import Param from mlflow.exceptions import MlflowException from mlflow.store.model_registry.sqlalchemy_store import ( SqlAlchemyStore as SqlAlchemyModelRegistryStore, @@ -771,3 +773,38 @@ def test_update_run(mock_store): end_time=mock.ANY, run_name="my name", ) + + +def test_client_log_metric_params_tags_overrides(mock_store): + experiment_id = mock.Mock() + start_time = mock.Mock() + run_name = mock.Mock() + run = MlflowClient().create_run(experiment_id, start_time, tags={}, run_name=run_name) + run_id = run.info.run_id + + run_operation = MlflowClient().log_metric(run_id, "m1", 0.87, 123456789, 1, synchronous=False) + run_operation.wait() + + run_operation = MlflowClient().log_param(run_id, "p1", "pv1", synchronous=False) + run_operation.wait() + + run_operation = MlflowClient().set_tag(run_id, "t1", "tv1", synchronous=False) + run_operation.wait() + + mock_store.log_metric_async.assert_called_once_with(run_id, Metric("m1", 0.87, 123456789, 1)) + mock_store.log_param_async.assert_called_once_with(run_id, Param("p1", "pv1")) + mock_store.set_tag_async.assert_called_once_with(run_id, RunTag("t1", "tv1")) + + mock_store.reset_mock() + + # log_batch_async + MlflowClient().create_run(experiment_id, start_time, {}) + metrics = [Metric("m1", 0.87, 123456789, 1), Metric("m2", 0.87, 123456789, 1)] + tags = [RunTag("t1", "tv1"), RunTag("t2", "tv2")] + params = [Param("p1", "pv1"), Param("p2", "pv2")] + run_operation = MlflowClient().log_batch(run_id, metrics, params, tags, synchronous=False) + run_operation.wait() + + mock_store.log_batch_async.assert_called_once_with( + run_id=run_id, metrics=metrics, params=params, tags=tags + ) diff --git a/tests/utils/test_async_logging_queue.py b/tests/utils/test_async_logging_queue.py new file mode 100644 index 0000000000000..553bb4b059a14 --- /dev/null +++ b/tests/utils/test_async_logging_queue.py @@ -0,0 +1,306 @@ +import io +import pickle +import random +import threading +import time +import uuid + +import pytest + +from mlflow import MlflowException +from mlflow.entities.metric import Metric +from mlflow.entities.param import Param +from mlflow.entities.run_tag import RunTag +from mlflow.utils.async_logging.async_logging_queue import AsyncLoggingQueue + +METRIC_PER_BATCH = 250 +TAGS_PER_BATCH = 1 +PARAMS_PER_BATCH = 1 +TOTAL_BATCHES = 5 + + +class RunData: + def __init__(self, throw_exception_on_batch_number=None) -> None: + if throw_exception_on_batch_number is None: + throw_exception_on_batch_number = [] + self.received_run_id = "" + self.received_metrics = [] + self.received_tags = [] + self.received_params = [] + self.batch_count = 0 + self.throw_exception_on_batch_number = ( + throw_exception_on_batch_number if throw_exception_on_batch_number else [] + ) + + def consume_queue_data(self, run_id, metrics, tags, params): + self.batch_count += 1 + if self.batch_count in self.throw_exception_on_batch_number: + raise MlflowException("Failed to log run data") + self.received_run_id = run_id + self.received_metrics.extend(metrics or []) + self.received_params.extend(params or []) + self.received_tags.extend(tags or []) + + +def test_single_thread_publish_consume_queue(): + run_id = "test_run_id" + run_data = RunData() + async_logging_queue = AsyncLoggingQueue(run_data.consume_queue_data) + async_logging_queue.activate() + metrics_sent = [] + tags_sent = [] + params_sent = [] + + run_operations = [] + for params, tags, metrics in _get_run_data(): + run_operations.append( + async_logging_queue.log_batch_async( + run_id=run_id, metrics=metrics, tags=tags, params=params + ) + ) + metrics_sent += metrics + tags_sent += tags + params_sent += params + + for run_operation in run_operations: + run_operation.wait() + + _assert_sent_received_data( + metrics_sent, + params_sent, + tags_sent, + run_data.received_metrics, + run_data.received_params, + run_data.received_tags, + ) + + +def test_queue_activation(): + run_id = "test_run_id" + run_data = RunData() + async_logging_queue = AsyncLoggingQueue(run_data.consume_queue_data) + + assert not async_logging_queue._is_activated + + metrics = [ + Metric( + key=f"batch metrics async-{val}", + value=val, + timestamp=val, + step=0, + ) + for val in range(METRIC_PER_BATCH) + ] + with pytest.raises(MlflowException, match="AsyncLoggingQueue is not activated."): + async_logging_queue.log_batch_async(run_id=run_id, metrics=metrics, tags=[], params=[]) + + async_logging_queue.activate() + assert async_logging_queue._is_activated + + +def test_partial_logging_failed(): + run_id = "test_run_id" + run_data = RunData(throw_exception_on_batch_number=[3, 4]) + + async_logging_queue = AsyncLoggingQueue(run_data.consume_queue_data) + async_logging_queue.activate() + + metrics_sent = [] + tags_sent = [] + params_sent = [] + + run_operations = [] + batch_id = 1 + for params, tags, metrics in _get_run_data(): + if batch_id in [3, 4]: + with pytest.raises(MlflowException, match="Failed to log run data"): + async_logging_queue.log_batch_async( + run_id=run_id, metrics=metrics, tags=tags, params=params + ).wait() + else: + run_operations.append( + async_logging_queue.log_batch_async( + run_id=run_id, metrics=metrics, tags=tags, params=params + ) + ) + metrics_sent += metrics + tags_sent += tags + params_sent += params + + batch_id += 1 + + for run_operation in run_operations: + run_operation.wait() + + _assert_sent_received_data( + metrics_sent, + params_sent, + tags_sent, + run_data.received_metrics, + run_data.received_params, + run_data.received_tags, + ) + + +def test_publish_multithread_consume_single_thread(): + run_id = "test_run_id" + run_data = RunData(throw_exception_on_batch_number=[]) + + async_logging_queue = AsyncLoggingQueue(run_data.consume_queue_data) + async_logging_queue.activate() + + run_operations = [] + t1 = threading.Thread( + target=_send_metrics_tags_params, args=(async_logging_queue, run_id, run_operations) + ) + t2 = threading.Thread( + target=_send_metrics_tags_params, args=(async_logging_queue, run_id, run_operations) + ) + + t1.start() + t2.start() + t1.join() + t2.join() + + for run_operation in run_operations: + run_operation.wait() + + assert len(run_data.received_metrics) == 2 * METRIC_PER_BATCH * TOTAL_BATCHES + assert len(run_data.received_tags) == 2 * TAGS_PER_BATCH * TOTAL_BATCHES + assert len(run_data.received_params) == 2 * PARAMS_PER_BATCH * TOTAL_BATCHES + + +class Consumer: + def __init__(self) -> None: + self.metrics = [] + self.tags = [] + self.params = [] + + def consume_queue_data(self, run_id, metrics, tags, params): + time.sleep(0.5) + self.metrics.extend(metrics or []) + self.params.extend(params or []) + self.tags.extend(tags or []) + + +def test_async_logging_queue_pickle(): + run_id = "test_run_id" + consumer = Consumer() + async_logging_queue = AsyncLoggingQueue(consumer.consume_queue_data) + + # Pickle the queue without activating it. + buffer = io.BytesIO() + pickle.dump(async_logging_queue, buffer) + deserialized_queue = pickle.loads(buffer.getvalue()) # Type: AsyncLoggingQueue + + # activate the queue and then try to pickle it + async_logging_queue.activate() + + run_operations = [] + for val in range(0, 10): + run_operations.append( + async_logging_queue.log_batch_async( + run_id=run_id, + metrics=[Metric("metric", val, timestamp=time.time(), step=1)], + tags=[], + params=[], + ) + ) + + assert not async_logging_queue._queue.empty() + + # Pickle the queue + buffer = io.BytesIO() + pickle.dump(async_logging_queue, buffer) + + deserialized_queue = pickle.loads(buffer.getvalue()) # Type: AsyncLoggingQueue + assert deserialized_queue._queue.empty() + assert deserialized_queue._lock is not None + assert deserialized_queue._is_activated is False + + for run_operation in run_operations: + run_operation.wait() + + assert len(consumer.metrics) == 10 + + # try to log using deserialized queue after activating it. + deserialized_queue.activate() + assert deserialized_queue._is_activated + + run_operations = [] + + for val in range(0, 10): + run_operations.append( + deserialized_queue.log_batch_async( + run_id=run_id, + metrics=[Metric("metric", val, timestamp=time.time(), step=1)], + tags=[], + params=[], + ) + ) + + for run_operation in run_operations: + run_operation.wait() + + assert len(deserialized_queue._logging_func.__self__.metrics) == 10 + + +def _send_metrics_tags_params(run_data_queueing_processor, run_id, run_operations=None): + if run_operations is None: + run_operations = [] + metrics_sent = [] + tags_sent = [] + params_sent = [] + + for params, tags, metrics in _get_run_data(): + run_operations.append( + run_data_queueing_processor.log_batch_async( + run_id=run_id, metrics=metrics, tags=tags, params=params + ) + ) + + time.sleep(random.randint(1, 3)) + metrics_sent += metrics + tags_sent += tags + params_sent += params + + +def _get_run_data(total_batches=TOTAL_BATCHES): + for num in range(0, total_batches): + guid8 = str(uuid.uuid4())[:8] + params = [ + Param(f"batch param-{guid8}-{val}", value=str(time.time())) + for val in range(PARAMS_PER_BATCH) + ] + tags = [ + RunTag(f"batch tag-{guid8}-{val}", value=str(time.time())) + for val in range(TAGS_PER_BATCH) + ] + metrics = [ + Metric( + key=f"batch metrics async-{num}", + value=val, + timestamp=int(time.time() * 1000), + step=0, + ) + for val in range(METRIC_PER_BATCH) + ] + yield params, tags, metrics + + +def _assert_sent_received_data( + metrics_sent, params_sent, tags_sent, received_metrics, received_params, received_tags +): + for num in range(1, len(metrics_sent)): + assert metrics_sent[num].key == received_metrics[num].key + assert metrics_sent[num].value == received_metrics[num].value + assert metrics_sent[num].timestamp == received_metrics[num].timestamp + assert metrics_sent[num].step == received_metrics[num].step + + for num in range(1, len(tags_sent)): + assert tags_sent[num].key == received_tags[num].key + assert tags_sent[num].value == received_tags[num].value + + for num in range(1, len(params_sent)): + assert params_sent[num].key == received_params[num].key + assert params_sent[num].value == received_params[num].value From 779451b1394a68c2a39bc7d8cc60337a50ee4d41 Mon Sep 17 00:00:00 2001 From: Jerry Liang <66143562+jerrylian-db@users.noreply.github.com> Date: Wed, 25 Oct 2023 23:44:39 -0700 Subject: [PATCH 100/101] Build copy model version API for SQL and UC model registry stores (#10078) Signed-off-by: Jerry Liang Signed-off-by: mlflow-automation Co-authored-by: mlflow-automation --- .../_unity_catalog/registry/rest_store.py | 13 ++++++ ...c7_add_storage_location_field_to_model_.py | 28 ++++++++++++ mlflow/store/model_registry/abstract_store.py | 11 +++-- .../store/model_registry/dbmodels/models.py | 2 + mlflow/store/model_registry/file_store.py | 7 +-- .../store/model_registry/sqlalchemy_store.py | 34 +++++++++++++- tests/db/check_migration.py | 2 +- tests/db/schemas/mssql.sql | 1 + tests/db/schemas/mysql.sql | 1 + tests/db/schemas/postgresql.sql | 1 + tests/db/schemas/sqlite.sql | 1 + tests/resources/db/latest_schema.sql | 1 + tests/store/model_registry/test_file_store.py | 23 +++++----- .../model_registry/test_sqlalchemy_store.py | 45 +++++++++++++++++++ 14 files changed, 149 insertions(+), 21 deletions(-) create mode 100644 mlflow/store/db_migrations/versions/acf3f17fdcc7_add_storage_location_field_to_model_.py diff --git a/mlflow/store/_unity_catalog/registry/rest_store.py b/mlflow/store/_unity_catalog/registry/rest_store.py index aaa20074ba6c3..4bb4f16f8f12a 100644 --- a/mlflow/store/_unity_catalog/registry/rest_store.py +++ b/mlflow/store/_unity_catalog/registry/rest_store.py @@ -745,6 +745,19 @@ def get_model_version_by_alias(self, name, alias): response_proto = self._call_endpoint(GetModelVersionByAliasRequest, req_body) return model_version_from_uc_proto(response_proto.model_version) + def copy_model_version(self, src_mv, dst_name): + """ + Copy a model version from one registered model to another as a new model version. + + :param src_mv: A :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the source model version. + :param dst_name: the name of the registered model to copy the model version to. If a + registered model with this name does not exist, it will be created. + :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the cloned model version. + """ + return self._copy_model_version_impl(src_mv, dst_name) + def _await_model_version_creation(self, mv, await_creation_for): """ Does not wait for the model version to become READY as a successful creation will diff --git a/mlflow/store/db_migrations/versions/acf3f17fdcc7_add_storage_location_field_to_model_.py b/mlflow/store/db_migrations/versions/acf3f17fdcc7_add_storage_location_field_to_model_.py new file mode 100644 index 0000000000000..03a40f3259b12 --- /dev/null +++ b/mlflow/store/db_migrations/versions/acf3f17fdcc7_add_storage_location_field_to_model_.py @@ -0,0 +1,28 @@ +"""add storage location field to model versions + +Revision ID: acf3f17fdcc7 +Revises: 2d6e25af4d3e +Create Date: 2023-10-23 15:26:53.062080 + +""" +from alembic import op +import sqlalchemy as sa +from mlflow.store.model_registry.dbmodels.models import SqlModelVersion + + +# revision identifiers, used by Alembic. +revision = "acf3f17fdcc7" +down_revision = "2d6e25af4d3e" +branch_labels = None +depends_on = None + + +def upgrade(): + op.add_column( + SqlModelVersion.__tablename__, + sa.Column("storage_location", sa.String(500), nullable=True, default=None), + ) + + +def downgrade(): + pass diff --git a/mlflow/store/model_registry/abstract_store.py b/mlflow/store/model_registry/abstract_store.py index 764ceead20a1e..e7fdcab21cb6c 100644 --- a/mlflow/store/model_registry/abstract_store.py +++ b/mlflow/store/model_registry/abstract_store.py @@ -5,7 +5,7 @@ from mlflow.entities.model_registry import ModelVersionTag from mlflow.entities.model_registry.model_version_status import ModelVersionStatus from mlflow.exceptions import MlflowException -from mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS +from mlflow.protos.databricks_pb2 import RESOURCE_ALREADY_EXISTS, ErrorCode from mlflow.utils.annotations import developer_stable _logger = logging.getLogger(__name__) @@ -323,7 +323,6 @@ def get_model_version_by_alias(self, name, alias): """ pass - @abstractmethod def copy_model_version(self, src_mv, dst_name): """ Copy a model version from one registered model to another as a new model version. @@ -335,13 +334,17 @@ def copy_model_version(self, src_mv, dst_name): :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object representing the cloned model version. """ - pass + raise MlflowException( + "Method 'copy_model_version' has not yet been implemented for the current model " + "registry backend. To request support for implementing this method with this backend, " + "please submit an issue on GitHub." + ) def _copy_model_version_impl(self, src_mv, dst_name): try: self.create_registered_model(dst_name) except MlflowException as e: - if e.error_code != RESOURCE_ALREADY_EXISTS: + if e.error_code != ErrorCode.Name(RESOURCE_ALREADY_EXISTS): raise return self.create_model_version( diff --git a/mlflow/store/model_registry/dbmodels/models.py b/mlflow/store/model_registry/dbmodels/models.py index fbe1f5a57bc65..30039eceec30f 100644 --- a/mlflow/store/model_registry/dbmodels/models.py +++ b/mlflow/store/model_registry/dbmodels/models.py @@ -80,6 +80,8 @@ class SqlModelVersion(Base): source = Column(String(500), nullable=True, default=None) + storage_location = Column(String(500), nullable=True, default=None) + run_id = Column(String(32), nullable=True, default=None) run_link = Column(String(500), nullable=True, default=None) diff --git a/mlflow/store/model_registry/file_store.py b/mlflow/store/model_registry/file_store.py index fa2b2c6a4df73..513a8a2463048 100644 --- a/mlflow/store/model_registry/file_store.py +++ b/mlflow/store/model_registry/file_store.py @@ -570,7 +570,8 @@ def create_model_version( Create a new model version from given source and run ID. :param name: Registered model name. - :param source: Source path where the MLflow model is stored. + :param source: Source path or model version URI (in the format + ``models://``) where the MLflow model is stored. :param run_id: Run ID from MLflow tracking server that generated the model. :param tags: A list of :py:class:`mlflow.entities.model_registry.ModelVersionTag` instances associated with this model version. @@ -593,10 +594,10 @@ def next_version(registered_model_name): _validate_model_version_tag(tag.key, tag.value) storage_location = source if urllib.parse.urlparse(source).scheme == "models": - (src_model_name, src_model_version, _, _) = _parse_model_uri(source) + parsed_model_uri = _parse_model_uri(source) try: storage_location = self.get_model_version_download_uri( - src_model_name, src_model_version + parsed_model_uri.name, parsed_model_uri.version ) except Exception as e: raise MlflowException( diff --git a/mlflow/store/model_registry/sqlalchemy_store.py b/mlflow/store/model_registry/sqlalchemy_store.py index a4a65e552f7a0..cabef5c65e674 100644 --- a/mlflow/store/model_registry/sqlalchemy_store.py +++ b/mlflow/store/model_registry/sqlalchemy_store.py @@ -1,9 +1,11 @@ import logging +import urllib import sqlalchemy from sqlalchemy.future import select import mlflow.store.db.utils +from mlflow.entities.model_registry import ModelVersion from mlflow.entities.model_registry.model_version_stages import ( ALL_STAGES, DEFAULT_STAGES_FOR_GET_LATEST_VERSIONS, @@ -18,6 +20,7 @@ RESOURCE_ALREADY_EXISTS, RESOURCE_DOES_NOT_EXIST, ) +from mlflow.store.artifact.utils.models import _parse_model_uri from mlflow.store.entities.paged_list import PagedList from mlflow.store.model_registry import ( SEARCH_MODEL_VERSION_MAX_RESULTS_DEFAULT, @@ -612,7 +615,8 @@ def create_model_version( Create a new model version from given source and run ID. :param name: Registered model name. - :param source: Source path where the MLflow model is stored. + :param source: Source path or model version URI (in the format + ``models://``) where the MLflow model is stored. :param run_id: Run ID from MLflow tracking server that generated the model. :param tags: A list of :py:class:`mlflow.entities.model_registry.ModelVersionTag` instances associated with this model version. @@ -631,6 +635,18 @@ def next_version(sql_registered_model): _validate_model_name(name) for tag in tags or []: _validate_model_version_tag(tag.key, tag.value) + storage_location = source + if urllib.parse.urlparse(source).scheme == "models": + parsed_model_uri = _parse_model_uri(source) + try: + storage_location = self.get_model_version_download_uri( + parsed_model_uri.name, parsed_model_uri.version + ) + except Exception as e: + raise MlflowException( + f"Unable to fetch model from model URI source artifact location '{source}'." + f"Error: {e}" + ) from e with self.ManagedSessionMaker() as session: creation_time = get_current_time_millis() for attempt in range(self.CREATE_MODEL_VERSION_RETRIES): @@ -644,6 +660,7 @@ def next_version(sql_registered_model): creation_time=creation_time, last_updated_time=creation_time, source=source, + storage_location=storage_location, run_id=run_id, run_link=run_link, description=description, @@ -856,7 +873,7 @@ def get_model_version_download_uri(self, name, version): """ with self.ManagedSessionMaker() as session: sql_model_version = self._get_sql_model_version(session, name, version) - return sql_model_version.source + return sql_model_version.storage_location or sql_model_version.source def search_model_versions( self, @@ -1099,6 +1116,19 @@ def get_model_version_by_alias(self, name, alias): f"Registered model alias {alias} not found.", INVALID_PARAMETER_VALUE ) + def copy_model_version(self, src_mv, dst_name) -> ModelVersion: + """ + Copy a model version from one registered model to another as a new model version. + + :param src_mv: A :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the source model version. + :param dst_name: the name of the registered model to copy the model version to. If a + registered model with this name does not exist, it will be created. + :return: Single :py:class:`mlflow.entities.model_registry.ModelVersion` object representing + the cloned model version. + """ + return self._copy_model_version_impl(src_mv, dst_name) + def _await_model_version_creation(self, mv, await_creation_for): """ Does not wait for the model version to become READY as a successful creation will diff --git a/tests/db/check_migration.py b/tests/db/check_migration.py index 104f1cf6ee43a..3bff39184671a 100644 --- a/tests/db/check_migration.py +++ b/tests/db/check_migration.py @@ -109,7 +109,7 @@ def post_migration(): for table in TABLES: df_actual = pd.read_sql(sa.text(f"SELECT * FROM {table}"), conn) df_expected = pd.read_pickle(SNAPSHOTS_DIR / f"{table}.pkl") - pd.testing.assert_frame_equal(df_actual, df_expected) + pd.testing.assert_frame_equal(df_actual[df_expected.columns], df_expected) if __name__ == "__main__": diff --git a/tests/db/schemas/mssql.sql b/tests/db/schemas/mssql.sql index 9021ebb57aef4..8361adab9c319 100644 --- a/tests/db/schemas/mssql.sql +++ b/tests/db/schemas/mssql.sql @@ -79,6 +79,7 @@ CREATE TABLE model_versions ( status VARCHAR(20) COLLATE "SQL_Latin1_General_CP1_CI_AS", status_message VARCHAR(500) COLLATE "SQL_Latin1_General_CP1_CI_AS", run_link VARCHAR(500) COLLATE "SQL_Latin1_General_CP1_CI_AS", + storage_location VARCHAR(500) COLLATE "SQL_Latin1_General_CP1_CI_AS", CONSTRAINT model_version_pk PRIMARY KEY (name, version), CONSTRAINT "FK__model_vers__name__5812160E" FOREIGN KEY(name) REFERENCES registered_models (name) ON UPDATE CASCADE ) diff --git a/tests/db/schemas/mysql.sql b/tests/db/schemas/mysql.sql index dccf071fe4220..757201f205dfd 100644 --- a/tests/db/schemas/mysql.sql +++ b/tests/db/schemas/mysql.sql @@ -80,6 +80,7 @@ CREATE TABLE model_versions ( status VARCHAR(20), status_message VARCHAR(500), run_link VARCHAR(500), + storage_location VARCHAR(500), PRIMARY KEY (name, version), CONSTRAINT model_versions_ibfk_1 FOREIGN KEY(name) REFERENCES registered_models (name) ON UPDATE CASCADE ) diff --git a/tests/db/schemas/postgresql.sql b/tests/db/schemas/postgresql.sql index f1134e4d03d94..158fe88288698 100644 --- a/tests/db/schemas/postgresql.sql +++ b/tests/db/schemas/postgresql.sql @@ -81,6 +81,7 @@ CREATE TABLE model_versions ( status VARCHAR(20), status_message VARCHAR(500), run_link VARCHAR(500), + storage_location VARCHAR(500), CONSTRAINT model_version_pk PRIMARY KEY (name, version), CONSTRAINT model_versions_name_fkey FOREIGN KEY(name) REFERENCES registered_models (name) ON UPDATE CASCADE ) diff --git a/tests/db/schemas/sqlite.sql b/tests/db/schemas/sqlite.sql index 2b385dc506822..09e310ea2e99c 100644 --- a/tests/db/schemas/sqlite.sql +++ b/tests/db/schemas/sqlite.sql @@ -82,6 +82,7 @@ CREATE TABLE model_versions ( status VARCHAR(20), status_message VARCHAR(500), run_link VARCHAR(500), + storage_location VARCHAR(500), CONSTRAINT model_version_pk PRIMARY KEY (name, version), FOREIGN KEY(name) REFERENCES registered_models (name) ON UPDATE CASCADE ) diff --git a/tests/resources/db/latest_schema.sql b/tests/resources/db/latest_schema.sql index 23dd6f13b5383..2ec1661808255 100644 --- a/tests/resources/db/latest_schema.sql +++ b/tests/resources/db/latest_schema.sql @@ -82,6 +82,7 @@ CREATE TABLE model_versions ( status VARCHAR(20), status_message VARCHAR(500), run_link VARCHAR(500), + storage_location VARCHAR(500), CONSTRAINT model_version_pk PRIMARY KEY (name, version), FOREIGN KEY(name) REFERENCES registered_models (name) ON UPDATE CASCADE ) diff --git a/tests/store/model_registry/test_file_store.py b/tests/store/model_registry/test_file_store.py index d4c577fbfe5c5..ef12611c2ba9e 100644 --- a/tests/store/model_registry/test_file_store.py +++ b/tests/store/model_registry/test_file_store.py @@ -1514,32 +1514,33 @@ def predict(self, context, model_input, params=None): assert mv2[0].name == "model2" -def test_copy_model_version(store): +@pytest.mark.parametrize("copy_to_same_model", [False, True]) +def test_copy_model_version(store, copy_to_same_model): name1 = "test_for_copy_MV1" store.create_registered_model(name1) src_tags = [ ModelVersionTag("key", "value"), ModelVersionTag("anotherKey", "some other value"), ] - with mock.patch("time.time", return_value=456778): - src_mv = _create_model_version( - store, name1, tags=src_tags, run_link="dummylink", description="test description" - ) + src_mv = _create_model_version( + store, name1, tags=src_tags, run_link="dummylink", description="test description" + ) # Make some changes to the src MV that won't be copied over store.transition_model_version_stage( name1, src_mv.version, "Production", archive_existing_versions=False ) - name2 = "test_for_copy_MV2" + copy_rm_name = name1 if copy_to_same_model else "test_for_copy_MV2" + copy_mv_version = 2 if copy_to_same_model else 1 timestamp = time.time() - dst_mv = store.copy_model_version(src_mv, name2) - assert dst_mv.name == name2 - assert dst_mv.version == 1 + dst_mv = store.copy_model_version(src_mv, copy_rm_name) + assert dst_mv.name == copy_rm_name + assert dst_mv.version == copy_mv_version copied_mv = store.get_model_version(dst_mv.name, dst_mv.version) - assert copied_mv.name == name2 - assert copied_mv.version == 1 + assert copied_mv.name == copy_rm_name + assert copied_mv.version == copy_mv_version assert copied_mv.current_stage == "None" assert copied_mv.creation_timestamp >= timestamp assert copied_mv.last_updated_timestamp >= timestamp diff --git a/tests/store/model_registry/test_sqlalchemy_store.py b/tests/store/model_registry/test_sqlalchemy_store.py index 1e3c52da79bf4..40d8ea3a7be61 100644 --- a/tests/store/model_registry/test_sqlalchemy_store.py +++ b/tests/store/model_registry/test_sqlalchemy_store.py @@ -1639,3 +1639,48 @@ def test_delete_model_deletes_alias(store): match=r"Registered model alias test_alias not found.", ): store.get_model_version_by_alias(model_name, "test_alias") + + +@pytest.mark.parametrize("copy_to_same_model", [False, True]) +def test_copy_model_version(store, copy_to_same_model): + name1 = "test_for_copy_MV1" + store.create_registered_model(name1) + src_tags = [ + ModelVersionTag("key", "value"), + ModelVersionTag("anotherKey", "some other value"), + ] + src_mv = _mv_maker( + store, name1, tags=src_tags, run_link="dummylink", description="test description" + ) + + # Make some changes to the src MV that won't be copied over + store.transition_model_version_stage( + name1, src_mv.version, "Production", archive_existing_versions=False + ) + + copy_rm_name = name1 if copy_to_same_model else "test_for_copy_MV2" + copy_mv_version = 2 if copy_to_same_model else 1 + timestamp = time.time() + dst_mv = store.copy_model_version(src_mv, copy_rm_name) + assert dst_mv.name == copy_rm_name + assert dst_mv.version == copy_mv_version + + copied_mv = store.get_model_version(dst_mv.name, dst_mv.version) + assert copied_mv.name == copy_rm_name + assert copied_mv.version == copy_mv_version + assert copied_mv.current_stage == "None" + assert copied_mv.creation_timestamp >= timestamp + assert copied_mv.last_updated_timestamp >= timestamp + assert copied_mv.description == "test description" + assert copied_mv.source == f"models:/{src_mv.name}/{src_mv.version}" + assert store.get_model_version_download_uri(dst_mv.name, dst_mv.version) == src_mv.source + assert copied_mv.run_link == "dummylink" + assert copied_mv.run_id == src_mv.run_id + assert copied_mv.status == "READY" + assert copied_mv.status_message is None + assert copied_mv.tags == {"key": "value", "anotherKey": "some other value"} + + # Copy a model version copy + double_copy_mv = store.copy_model_version(copied_mv, "test_for_copy_MV3") + assert double_copy_mv.source == f"models:/{copied_mv.name}/{copied_mv.version}" + assert store.get_model_version_download_uri(dst_mv.name, dst_mv.version) == src_mv.source From c5ed81704a548cc65787ffab99bd1dfaaa274400 Mon Sep 17 00:00:00 2001 From: Adarsh Shrivastav <142413097+AdarshKumarShorthillsAI@users.noreply.github.com> Date: Thu, 26 Oct 2023 13:12:50 +0530 Subject: [PATCH 101/101] Update get_minimum_required_python.py (#12) * Update main.py Signed-off-by: Adarsh Shrivastav <142413097+AdarshKumarShorthillsAI@users.noreply.github.com> * Update get_minimum_required_python.py Signed-off-by: Adarsh Shrivastav <142413097+AdarshKumarShorthillsAI@users.noreply.github.com> --------- Signed-off-by: Adarsh Shrivastav <142413097+AdarshKumarShorthillsAI@users.noreply.github.com> --- dev/get_minimum_required_python.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/dev/get_minimum_required_python.py b/dev/get_minimum_required_python.py index 0530e68594a8e..db022ef83d763 100644 --- a/dev/get_minimum_required_python.py +++ b/dev/get_minimum_required_python.py @@ -5,14 +5,14 @@ python dev/get_minimum_required_python.py -p scikit-learn -v 1.1.0 --python-versions "3.8" """ import argparse -import typing as t +from typing import Optional import requests from packaging.specifiers import SpecifierSet from packaging.version import Version -def get_requires_python(package: str, version: str) -> t.Optional[str]: +def get_requires_python(package: str, version: str) -> Optional[str]: resp = requests.get(f"https://pypi.python.org/pypi/{package}/json") resp.raise_for_status() return next(