Skip to content

Commit

Permalink
Merge pull request o3de#1849 from aws-lumberyard-dev/jonawals_SPEC-7604
Browse files Browse the repository at this point in the history
Jonawals spec 7604
  • Loading branch information
jonawals authored Jul 5, 2021
2 parents 45b83e4 + c5a4fd3 commit 81f9afd
Show file tree
Hide file tree
Showing 12 changed files with 125 additions and 74 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ namespace TestImpact

namespace AZStd
{
//! Hash function for ParentTarget types for use in maps and sets
//! Hash function for ParentTarget types for use in maps and sets.
template<> struct hash<TestImpact::ParentTarget>
{
size_t operator()(const TestImpact::ParentTarget& parentTarget) const noexcept
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ namespace TestImpact
//! Returns true if the specified target is in the list, otherwise false.
bool HasTarget(const AZStd::string& name) const;

// Returns the number of targets in the list.
//! Returns the number of targets in the list.
size_t GetNumTargets() const;

private:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,17 +28,6 @@ namespace TestImpact
return m_coverageArtifact;
}

InstrumentedTestRunner::JobPayload ParseTestRunAndCoverageFiles(
const RepoPath& runFile,
const RepoPath& coverageFile,
AZStd::chrono::milliseconds duration)
{
TestRun run(GTest::TestRunSuitesFactory(ReadFileContents<TestEngineException>(runFile)), duration);
AZStd::vector<ModuleCoverage> moduleCoverages = Cobertura::ModuleCoveragesFactory(ReadFileContents<TestEngineException>(coverageFile));
TestCoverage coverage(AZStd::move(moduleCoverages));
return {AZStd::move(run), AZStd::move(coverage)};
}

InstrumentedTestRunner::InstrumentedTestRunner(size_t maxConcurrentRuns)
: JobRunner(maxConcurrentRuns)
{
Expand All @@ -58,16 +47,35 @@ namespace TestImpact
const auto& [meta, jobInfo] = jobData;
if (meta.m_result == JobResult::ExecutedWithSuccess || meta.m_result == JobResult::ExecutedWithFailure)
{
const auto printException = [](const Exception& e)
{
AZ_Printf("RunInstrumentedTests", AZStd::string::format("%s\n.", e.what()).c_str());
};

AZStd::optional<TestRun> run;
try
{
runs[jobId] = ParseTestRunAndCoverageFiles(
jobInfo->GetRunArtifactPath(),
jobInfo->GetCoverageArtifactPath(),
run = TestRun(
GTest::TestRunSuitesFactory(ReadFileContents<TestEngineException>(jobInfo->GetRunArtifactPath())),
meta.m_duration.value());
}
catch (const Exception& e)
{
AZ_Printf("RunInstrumentedTests", AZStd::string::format("%s\n", e.what()).c_str());
// No run result is not necessarily a failure (e.g. test targets not using gtest)
printException(e);
}

try
{
AZStd::vector<ModuleCoverage> moduleCoverages =
Cobertura::ModuleCoveragesFactory(ReadFileContents<TestEngineException>(jobInfo->GetCoverageArtifactPath()));
TestCoverage coverage(AZStd::move(moduleCoverages));
runs[jobId] = { run, AZStd::move(coverage) };
}
catch (const Exception& e)
{
printException(e);
// No coverage, however, is a failure
runs[jobId] = AZStd::nullopt;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,9 +30,9 @@ namespace TestImpact

//! Runs a batch of test targets to determine the test coverage and passes/failures.
class InstrumentedTestRunner
: public TestJobRunner<InstrumentedTestRunJobData, AZStd::pair<TestRun, TestCoverage>>
: public TestJobRunner<InstrumentedTestRunJobData, AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>
{
using JobRunner = TestJobRunner<InstrumentedTestRunJobData, AZStd::pair<TestRun, TestCoverage>>;
using JobRunner = TestJobRunner<InstrumentedTestRunJobData, AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>;

public:
//! Constructs an instrumented test runner with the specified parameters common to all job runs of this runner.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -13,17 +13,18 @@ namespace TestImpact
{
namespace
{
AZStd::optional<TestRun> ReleaseTestRun(AZStd::optional<AZStd::pair<TestRun, TestCoverage>>& testRunAndCoverage)
AZStd::optional<TestRun> ReleaseTestRun(AZStd::optional<AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>& testRunAndCoverage)
{
if (testRunAndCoverage.has_value())
if (testRunAndCoverage.has_value() && testRunAndCoverage->first.has_value())
{
return AZStd::move(testRunAndCoverage.value().first);
}

return AZStd::nullopt;
}

AZStd::optional<TestCoverage> ReleaseTestCoverage(AZStd::optional<AZStd::pair<TestRun, TestCoverage>>& testRunAndCoverage)
AZStd::optional<TestCoverage> ReleaseTestCoverage(
AZStd::optional<AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>& testRunAndCoverage)
{
if (testRunAndCoverage.has_value())
{
Expand All @@ -34,7 +35,8 @@ namespace TestImpact
}
}

TestEngineInstrumentedRun::TestEngineInstrumentedRun(TestEngineJob&& testJob, AZStd::optional<AZStd::pair<TestRun, TestCoverage>>&& testRunAndCoverage)
TestEngineInstrumentedRun::TestEngineInstrumentedRun(
TestEngineJob&& testJob, AZStd::optional<AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>&& testRunAndCoverage)
: TestEngineRegularRun(AZStd::move(testJob), ReleaseTestRun(testRunAndCoverage))
, m_testCoverage(ReleaseTestCoverage(testRunAndCoverage))
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ namespace TestImpact
: public TestEngineRegularRun
{
public:
TestEngineInstrumentedRun(TestEngineJob&& testJob, AZStd::optional<AZStd::pair<TestRun, TestCoverage>>&& testRunAndCoverage);
TestEngineInstrumentedRun(TestEngineJob&& testJob, AZStd::optional<AZStd::pair<AZStd::optional<TestRun>, TestCoverage>>&& testRunAndCoverage);

//! Returns the test coverage payload for this job (if any).
const AZStd::optional<TestCoverage>& GetTestCoverge() const;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ namespace TestImpact
{
namespace
{
static const char* const LogCallSite = "TestImpact";

//! Simple helper class for tracking basic timing information.
class Timer
{
Expand Down Expand Up @@ -149,7 +151,8 @@ namespace TestImpact
}
catch ([[maybe_unused]]const Exception& e)
{
AZ_Printf("TestImpactRuntime",
AZ_Printf(
LogCallSite,
AZStd::string::format(
"No test impact analysis data found for suite '%s' at %s\n", GetSuiteTypeName(m_suiteFilter).c_str(), m_sparTIAFile.c_str()).c_str());
}
Expand Down Expand Up @@ -283,8 +286,8 @@ namespace TestImpact
job.GetTestCoverge().has_value(),
RuntimeException,
AZStd::string::format(
"Test target '%s' completed its test run successfully but produced no coverage data",
job.GetTestTarget()->GetName().c_str()));
"Test target '%s' completed its test run successfully but produced no coverage data. Command string: '%s'",
job.GetTestTarget()->GetName().c_str(), job.GetCommandString().c_str()));
}

if (!job.GetTestCoverge().has_value())
Expand Down Expand Up @@ -313,7 +316,7 @@ namespace TestImpact
}
else
{
AZ_Warning("TestImpact", false, "Ignoring source, source it outside of repo: '%s'", sourcePath.c_str());
AZ_Warning(LogCallSite, false, "Ignoring source, source it outside of repo: '%s'", sourcePath.c_str());
}
}

Expand All @@ -322,17 +325,31 @@ namespace TestImpact

void Runtime::UpdateAndSerializeDynamicDependencyMap(const AZStd::vector<TestEngineInstrumentedRun>& jobs)
{
const auto sourceCoverageTestsList = CreateSourceCoveringTestFromTestCoverages(jobs);
if (!sourceCoverageTestsList.GetNumSources())
try
{
return;
}
const auto sourceCoverageTestsList = CreateSourceCoveringTestFromTestCoverages(jobs);
if (sourceCoverageTestsList.GetNumSources() == 0)
{
return;
}

m_dynamicDependencyMap->ReplaceSourceCoverage(sourceCoverageTestsList);
const auto sparTIA = m_dynamicDependencyMap->ExportSourceCoverage();
const auto sparTIAData = SerializeSourceCoveringTestsList(sparTIA);
WriteFileContents<RuntimeException>(sparTIAData, m_sparTIAFile);
m_hasImpactAnalysisData = true;
m_dynamicDependencyMap->ReplaceSourceCoverage(sourceCoverageTestsList);
const auto sparTIA = m_dynamicDependencyMap->ExportSourceCoverage();
const auto sparTIAData = SerializeSourceCoveringTestsList(sparTIA);
WriteFileContents<RuntimeException>(sparTIAData, m_sparTIAFile);
m_hasImpactAnalysisData = true;
}
catch(const RuntimeException& e)
{
if (m_integrationFailurePolicy == Policy::IntegrityFailure::Abort)
{
throw e;
}
else
{
AZ_Error(LogCallSite, false, e.what());
}
}
}

TestSequenceResult Runtime::RegularTestSequence(
Expand Down
4 changes: 0 additions & 4 deletions cmake/TestImpactFramework/ConsoleFrontendConfig.in
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,6 @@
"timestamp": "${timestamp}"
},
"jenkins": {
"pipeline_of_truth" : [
"nightly-incremental",
"nightly-clean"
],
"use_test_impact_analysis": ${use_tiaf}
},
"repo": {
Expand Down
15 changes: 12 additions & 3 deletions scripts/build/Jenkins/Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,17 @@ EMPTY_JSON = readJSON text: '{}'

ENGINE_REPOSITORY_NAME = 'o3de'

BUILD_SNAPSHOTS = ['development', 'stabilization/2106', '']
DEFAULT_BUILD_SNAPSHOT = BUILD_SNAPSHOTS.get(0)
// Branches with build snapshots
BUILD_SNAPSHOTS = ['development', 'stabilization/2106']

// Build snapshots with empty snapshot (for use with 'SNAPSHOT' pipeline paramater)
BUILD_SNAPSHOTS_WITH_EMPTY = BUILD_SNAPSHOTS + ''

// The default build snapshot to be selected in the 'SNAPSHOT' pipeline paramater
DEFAULT_BUILD_SNAPSHOT = BUILD_SNAPSHOTS_WITH_EMPTY.get(0)

// Branches with build snapshots as comma separated value string
env.BUILD_SNAPSHOTS = BUILD_SNAPSHOTS.join(",")

def pipelineProperties = []

Expand Down Expand Up @@ -476,7 +485,7 @@ try {
}
} else {
// Non-PR builds
pipelineParameters.add(choice(defaultValue: DEFAULT_BUILD_SNAPSHOT, name: 'SNAPSHOT', choices: BUILD_SNAPSHOTS, description: 'Selects the build snapshot to use. A more diverted snapshot will cause longer build times, but will not cause build failures.'))
pipelineParameters.add(choice(defaultValue: DEFAULT_BUILD_SNAPSHOT, name: 'SNAPSHOT', choices: BUILD_SNAPSHOTS_WITH_EMPTY, description: 'Selects the build snapshot to use. A more diverted snapshot will cause longer build times, but will not cause build failures.'))
snapshot = env.SNAPSHOT
echo "Snapshot \"${snapshot}\" selected."
}
Expand Down
7 changes: 3 additions & 4 deletions scripts/build/Platform/Windows/build_config.json
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,7 @@
},
"profile_vs2019_pipe": {
"TAGS": [
"default",
"nightly-incremental",
"nightly-clean"
"default"
],
"steps": [
"profile_vs2019",
Expand Down Expand Up @@ -90,7 +88,8 @@
"OUTPUT_DIRECTORY": "build/windows_vs2019",
"CONFIGURATION": "profile",
"SCRIPT_PATH": "scripts/build/TestImpactAnalysis/tiaf_driver.py",
"SCRIPT_PARAMETERS": "--testFailurePolicy=continue --suite main --pipeline !PIPELINE_NAME! --destCommit !CHANGE_ID! --config \"!OUTPUT_DIRECTORY!/bin/TestImpactFramework/persistent/tiaf.profile.json\""
"SCRIPT_PARAMETERS":
"--config=\"!OUTPUT_DIRECTORY!/bin/TestImpactFramework/persistent/tiaf.profile.json\" --suite=main --testFailurePolicy=continue --destBranch=!CHANGE_TARGET! --pipeline=!PIPELINE_NAME! --destCommit=!CHANGE_ID! --branchesOfTruth=!BUILD_SNAPSHOTS! --pipelinesOfTruth=default"
}
},
"debug_vs2019": {
Expand Down
66 changes: 42 additions & 24 deletions scripts/build/TestImpactAnalysis/tiaf.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,33 +20,51 @@ def is_child_path(parent_path, child_path):
return os.path.commonpath([os.path.abspath(parent_path)]) == os.path.commonpath([os.path.abspath(parent_path), os.path.abspath(child_path)])

class TestImpact:
def __init__(self, config_file, pipeline, dst_commit):
self.__pipeline = pipeline
def __init__(self, config_file, dst_commit, dst_branch, pipeline, branches_of_truth, pipelines_of_truth):
# Commit
self.__dst_commit = dst_commit
print(f"Commit: '{self.__dst_commit}'.")
self.__src_commit = None
self.__has_src_commit = False
# Branch
self.__dst_branch = dst_branch
print(f"Destination branch: '{self.__dst_branch}'.")
self.__branches_of_truth = branches_of_truth
print(f"Branches of truth: '{self.__branches_of_truth}'.")
if self.__dst_branch in self.__branches_of_truth:
self.__is_branch_of_truth = True
else:
self.__is_branch_of_truth = False
print(f"Is branch of truth: '{self.__is_branch_of_truth}'.")
# Pipeline
self.__pipeline = pipeline
print(f"Pipeline: '{self.__pipeline}'.")
self.__pipelines_of_truth = pipelines_of_truth
print(f"Pipelines of truth: '{self.__pipelines_of_truth}'.")
if self.__pipeline in self.__pipelines_of_truth:
self.__is_pipeline_of_truth = True
else:
self.__is_pipeline_of_truth = False
print(f"Is pipeline of truth: '{self.__is_pipeline_of_truth}'.")
# Config
self.__parse_config_file(config_file)
if self.__use_test_impact_analysis and not self.__is_pipeline_of_truth:
self.__generate_change_list()
# Sequence
if self.__use_test_impact_analysis:
if self.__is_pipeline_of_truth and self.__is_branch_of_truth:
self.__is_seeding = True
else:
self.__is_seeding = False
self.__generate_change_list()

# Parse the configuration file and retrieve the data needed for launching the test impact analysis runtime
def __parse_config_file(self, config_file):
print(f"Attempting to parse configuration file '{config_file}'...")
with open(config_file, "r") as config_data:
config = json.load(config_data)
# Repository
self.__repo_dir = config["repo"]["root"]
# Jenkins
self.__repo = Repo(self.__repo_dir)
# TIAF
self.__use_test_impact_analysis = config["jenkins"]["use_test_impact_analysis"]
self.__pipeline_of_truth = config["jenkins"]["pipeline_of_truth"]
print(f"Pipeline of truth: '{self.__pipeline_of_truth}'.")
print(f"This pipeline: '{self.__pipeline}'.")
if self.__pipeline in self.__pipeline_of_truth:
self.__is_pipeline_of_truth = True
else:
self.__is_pipeline_of_truth = False
print(f"Is pipeline of truth: '{self.__is_pipeline_of_truth}'.")
# TIAF binary
self.__tiaf_bin = config["repo"]["tiaf_bin"]
if self.__use_test_impact_analysis and not os.path.isfile(self.__tiaf_bin):
raise FileNotFoundError("Could not find tiaf binary")
Expand Down Expand Up @@ -143,7 +161,7 @@ def __generate_change_list(self):
# Runs the specified test sequence
def run(self, suite, test_failure_policy, safe_mode, test_timeout, global_timeout):
args = []
pipeline_of_truth_test_failure_policy = "continue"
seed_sequence_test_failure_policy = "continue"
# Suite
args.append(f"--suite={suite}")
print(f"Test suite is set to '{suite}'.")
Expand All @@ -156,15 +174,15 @@ def run(self, suite, test_failure_policy, safe_mode, test_timeout, global_timeou
print(f"Global sequence timeout is set to {test_timeout} seconds.")
if self.__use_test_impact_analysis:
print("Test impact analysis is enabled.")
# Pipeline of truth sequence
if self.__is_pipeline_of_truth:
# Seed sequences
if self.__is_seeding:
# Sequence type
args.append("--sequence=seed")
print("Sequence type is set to 'seed'.")
# Test failure policy
args.append(f"--fpolicy={pipeline_of_truth_test_failure_policy}")
print(f"Test failure policy is set to '{pipeline_of_truth_test_failure_policy}'.")
# Non pipeline of truth sequence
args.append(f"--fpolicy={seed_sequence_test_failure_policy}")
print(f"Test failure policy is set to '{seed_sequence_test_failure_policy}'.")
# Impact analysis sequences
else:
if self.__has_change_list:
# Change list
Expand Down Expand Up @@ -194,8 +212,8 @@ def run(self, suite, test_failure_policy, safe_mode, test_timeout, global_timeou
# Pipeline of truth sequence
if self.__is_pipeline_of_truth:
# Test failure policy
args.append(f"--fpolicy={pipeline_of_truth_test_failure_policy}")
print(f"Test failure policy is set to '{pipeline_of_truth_test_failure_policy}'.")
args.append(f"--fpolicy={seed_sequence_test_failure_policy}")
print(f"Test failure policy is set to '{seed_sequence_test_failure_policy}'.")
# Non pipeline of truth sequence
else:
# Test failure policy
Expand All @@ -205,7 +223,7 @@ def run(self, suite, test_failure_policy, safe_mode, test_timeout, global_timeou
print("Args: ", end='')
print(*args)
result = subprocess.run([self.__tiaf_bin] + args)
# If the sequence completed 9with or without failures) we will update the historical meta-data
# If the sequence completed (with or without failures) we will update the historical meta-data
if result.returncode == 0 or result.returncode == 7:
print("Test impact analysis runtime returned successfully.")
if self.__is_pipeline_of_truth:
Expand Down
Loading

0 comments on commit 81f9afd

Please sign in to comment.