Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: implement WDL 1.2 task evaluation. #265

Merged
merged 11 commits into from
Dec 11, 2024
Next Next commit
refactor: refactor analysis results.
This commit refactors analysis results so that an analysis document contains
the identifier, URI, and the diagnostics resulting from the analysis of the
document.

As a consequence, the `ParseResult` enumeration was removed and merged with
`AnalysisResult`.
peterhuene committed Dec 10, 2024
commit 97a277624da8636aaba782ff5c22936a6ebda8c1
8 changes: 4 additions & 4 deletions gauntlet/src/lib.rs
Original file line number Diff line number Diff line change
@@ -198,7 +198,7 @@ pub async fn gauntlet(args: Args) -> Result<()> {
total_time += elapsed;

for result in &results {
let path = result.uri().to_file_path().ok();
let path = result.document().uri().to_file_path().ok();
let path = match &path {
Some(path) => path
.strip_prefix(&repo_root)
@@ -211,17 +211,17 @@ pub async fn gauntlet(args: Args) -> Result<()> {
let document_identifier =
document::Identifier::new(repository_identifier.clone(), &path);

let diagnostics: Cow<'_, [Diagnostic]> = match result.parse_result().error() {
let diagnostics: Cow<'_, [Diagnostic]> = match result.error() {
Some(e) => {
vec![Diagnostic::error(format!("failed to read `{path}`: {e:#}"))].into()
}
None => result.diagnostics().into(),
None => result.document().diagnostics().into(),
};

let mut actual = IndexSet::new();
if !diagnostics.is_empty() {
let source = result
.parse_result()
.document()
.root()
.map(|n| SyntaxNode::new_root(n.clone()).text().to_string())
.unwrap_or(String::new());
242 changes: 82 additions & 160 deletions wdl-analysis/src/analyzer.rs
Original file line number Diff line number Diff line change
@@ -12,6 +12,7 @@ use std::sync::Arc;
use std::thread::JoinHandle;

use anyhow::Context;
use anyhow::Error;
use anyhow::Result;
use anyhow::anyhow;
use anyhow::bail;
@@ -21,13 +22,11 @@ use line_index::LineIndex;
use line_index::WideEncoding;
use line_index::WideLineCol;
use path_clean::clean;
use rowan::GreenNode;
use tokio::runtime::Handle;
use tokio::sync::mpsc;
use tokio::sync::oneshot;
use url::Url;
use walkdir::WalkDir;
use wdl_ast::Diagnostic;
use wdl_ast::Severity;
use wdl_ast::SyntaxNode;
use wdl_ast::SyntaxNodeExt;
@@ -75,157 +74,68 @@ pub fn path_to_uri(path: impl AsRef<Path>) -> Option<Url> {
Url::from_file_path(clean(absolute(path).ok()?)).ok()
}

/// Represents the result of a parse.
#[derive(Debug, Clone)]
pub enum ParseResult {
/// There was an error parsing the document.
Error(Arc<anyhow::Error>),
/// The document was parsed.
Parsed {
/// The monotonic version of the document that was parsed.
///
/// This value comes from incremental changes to the file.
///
/// If `None`, the parsed version had no incremental changes.
version: Option<i32>,
/// The root node of the document.
root: GreenNode,
/// The line index used to map line/column offsets to byte offsets and
/// vice versa.
lines: Arc<LineIndex>,
},
}

impl ParseResult {
/// Gets the version of the parsed document.
///
/// Returns `None` if there was an error parsing the document or the parsed
/// document had no incremental changes.
pub fn version(&self) -> Option<i32> {
match self {
Self::Error(_) => None,
Self::Parsed { version, .. } => *version,
}
}

/// Gets the root from the parse result.
///
/// Returns `None` if there was an error parsing the document.
pub fn root(&self) -> Option<&GreenNode> {
match self {
Self::Error(_) => None,
Self::Parsed { root, .. } => Some(root),
}
}

/// Gets the line index from the parse result.
///
/// Returns `None` if there was an error parsing the document.
pub fn lines(&self) -> Option<&Arc<LineIndex>> {
match self {
Self::Error(_) => None,
Self::Parsed { lines, .. } => Some(lines),
}
}

/// Gets the AST document of the parse result.
///
/// Returns `None` if there was an error parsing the document.
pub fn document(&self) -> Option<wdl_ast::Document> {
match &self {
ParseResult::Error(_) => None,
ParseResult::Parsed { root, .. } => Some(
wdl_ast::Document::cast(SyntaxNode::new_root(root.clone()))
.expect("node should cast"),
),
}
}

/// Gets the error parsing the document.
///
/// Returns` None` if the document was parsed.
pub fn error(&self) -> Option<&Arc<anyhow::Error>> {
match self {
Self::Error(e) => Some(e),
ParseResult::Parsed { .. } => None,
}
}
}

impl From<&ParseState> for ParseResult {
fn from(state: &ParseState) -> Self {
match state {
ParseState::NotParsed => {
panic!("cannot create a result for an file that hasn't been parsed")
}
ParseState::Error(e) => Self::Error(e.clone()),
ParseState::Parsed {
version,
root,
lines,
diagnostics: _,
} => Self::Parsed {
version: *version,
root: root.clone(),
lines: lines.clone(),
},
}
}
}

/// Represents the result of an analysis.
///
/// Analysis results are cheap to clone.
#[derive(Debug, Clone)]
pub struct AnalysisResult {
/// The analysis result id.
/// The error that occurred when attempting to parse the file (e.g. the file
/// could not be opened).
error: Option<Arc<Error>>,
/// The monotonic version of the document that was parsed.
///
/// This value comes from incremental changes to the file.
///
/// The identifier changes every time the document is analyzed.
id: Arc<String>,
/// The URI of the analyzed document.
uri: Arc<Url>,
/// The result from parsing the file.
parse_result: ParseResult,
/// The diagnostics for the document.
diagnostics: Arc<[Diagnostic]>,
/// If `None`, the parsed version had no incremental changes.
version: Option<i32>,
/// The lines indexed for the parsed file.
lines: Option<Arc<LineIndex>>,
/// The analyzed document.
document: Arc<Document>,
}

impl AnalysisResult {
/// Constructs a new analysis result for the given graph node.
pub(crate) fn new(node: &DocumentGraphNode) -> Self {
let analysis = node.analysis().expect("analysis not completed");
let (error, version, lines) = match node.parse_state() {
ParseState::NotParsed => unreachable!("document should have been parsed"),
ParseState::Error(e) => (Some(e), None, None),
ParseState::Parsed { version, lines, .. } => (None, *version, Some(lines)),
};

Self {
id: analysis.id().clone(),
uri: node.uri().clone(),
parse_result: node.parse_state().into(),
diagnostics: analysis.diagnostics().clone(),
document: analysis.document().clone(),
error: error.cloned(),
version,
lines: lines.cloned(),
document: node
.analysis()
.expect("analysis should have completed")
.clone(),
}
}

/// Gets the identifier of the analysis result.
/// Gets the error that occurred when attempting to parse the document.
///
/// This value changes when a document is reanalyzed.
pub fn id(&self) -> &Arc<String> {
&self.id
}

/// Gets the URI of the document that was analyzed.
pub fn uri(&self) -> &Arc<Url> {
&self.uri
/// An example error would be if the file could not be opened.
///
/// Returns `None` if the document was parsed successfully.
pub fn error(&self) -> Option<&Arc<Error>> {
self.error.as_ref()
}

/// Gets the result of the parse.
pub fn parse_result(&self) -> &ParseResult {
&self.parse_result
/// Gets the incremental version of the parsed document.
///
/// Returns `None` if there was an error parsing the document or if the
/// parsed document had no incremental changes.
pub fn version(&self) -> Option<i32> {
self.version
}

/// Gets the diagnostics associated with the document.
pub fn diagnostics(&self) -> &[Diagnostic] {
&self.diagnostics
/// Gets the line index of the parsed document.
///
/// Returns `None` if there was an error parsing the document.
pub fn lines(&self) -> Option<&Arc<LineIndex>> {
self.lines.as_ref()
}

/// Gets the analyzed document.
@@ -828,24 +738,30 @@ workflow test {

let results = analyzer.analyze(()).await.unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].diagnostics().len(), 1);
assert_eq!(results[0].diagnostics()[0].rule(), None);
assert_eq!(results[0].diagnostics()[0].severity(), Severity::Error);
assert_eq!(results[0].document.diagnostics().len(), 1);
assert_eq!(results[0].document.diagnostics()[0].rule(), None);
assert_eq!(
results[0].document.diagnostics()[0].severity(),
Severity::Error
);
assert_eq!(
results[0].diagnostics()[0].message(),
results[0].document.diagnostics()[0].message(),
"conflicting workflow name `test`"
);

// Analyze again and ensure the analysis result id is unchanged
let id = results[0].id().clone();
let id = results[0].document.id().clone();
let results = analyzer.analyze(()).await.unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].id().as_ref(), id.as_ref());
assert_eq!(results[0].diagnostics().len(), 1);
assert_eq!(results[0].diagnostics()[0].rule(), None);
assert_eq!(results[0].diagnostics()[0].severity(), Severity::Error);
assert_eq!(results[0].document.id().as_ref(), id.as_ref());
assert_eq!(results[0].document.diagnostics().len(), 1);
assert_eq!(results[0].document.diagnostics()[0].rule(), None);
assert_eq!(
results[0].document.diagnostics()[0].severity(),
Severity::Error
);
assert_eq!(
results[0].diagnostics()[0].message(),
results[0].document.diagnostics()[0].message(),
"conflicting workflow name `test`"
);
}
@@ -877,11 +793,14 @@ workflow test {

let results = analyzer.analyze(()).await.unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].diagnostics().len(), 1);
assert_eq!(results[0].diagnostics()[0].rule(), None);
assert_eq!(results[0].diagnostics()[0].severity(), Severity::Error);
assert_eq!(results[0].document.diagnostics().len(), 1);
assert_eq!(results[0].document.diagnostics()[0].rule(), None);
assert_eq!(
results[0].diagnostics()[0].message(),
results[0].document.diagnostics()[0].severity(),
Severity::Error
);
assert_eq!(
results[0].document.diagnostics()[0].message(),
"conflicting workflow name `test`"
);

@@ -905,18 +824,18 @@ workflow something_else {

// Analyze again and ensure the analysis result id is changed and the issue
// fixed
let id = results[0].id().clone();
let id = results[0].document.id().clone();
let results = analyzer.analyze(()).await.unwrap();
assert_eq!(results.len(), 1);
assert!(results[0].id().as_ref() != id.as_ref());
assert_eq!(results[0].diagnostics().len(), 0);
assert!(results[0].document.id().as_ref() != id.as_ref());
assert_eq!(results[0].document.diagnostics().len(), 0);

// Analyze again and ensure the analysis result id is unchanged
let id = results[0].id().clone();
let id = results[0].document.id().clone();
let results = analyzer.analyze_document((), uri).await.unwrap();
assert_eq!(results.len(), 1);
assert!(results[0].id().as_ref() == id.as_ref());
assert_eq!(results[0].diagnostics().len(), 0);
assert!(results[0].document.id().as_ref() == id.as_ref());
assert_eq!(results[0].document.diagnostics().len(), 0);
}

#[tokio::test]
@@ -946,11 +865,14 @@ workflow test {

let results = analyzer.analyze(()).await.unwrap();
assert_eq!(results.len(), 1);
assert_eq!(results[0].diagnostics().len(), 1);
assert_eq!(results[0].diagnostics()[0].rule(), None);
assert_eq!(results[0].diagnostics()[0].severity(), Severity::Error);
assert_eq!(results[0].document.diagnostics().len(), 1);
assert_eq!(results[0].document.diagnostics()[0].rule(), None);
assert_eq!(
results[0].document.diagnostics()[0].severity(),
Severity::Error
);
assert_eq!(
results[0].diagnostics()[0].message(),
results[0].document.diagnostics()[0].message(),
"conflicting workflow name `test`"
);

@@ -970,11 +892,11 @@ workflow test {

// Analyze again and ensure the analysis result id is changed and the issue was
// fixed
let id = results[0].id().clone();
let id = results[0].document.id().clone();
let results = analyzer.analyze_document((), uri).await.unwrap();
assert_eq!(results.len(), 1);
assert!(results[0].id().as_ref() != id.as_ref());
assert_eq!(results[0].diagnostics().len(), 0);
assert!(results[0].document.id().as_ref() != id.as_ref());
assert_eq!(results[0].document.diagnostics().len(), 0);
}

#[tokio::test]
@@ -1020,9 +942,9 @@ workflow test {
// Analyze the documents
let results = analyzer.analyze(()).await.unwrap();
assert_eq!(results.len(), 3);
assert!(results[0].diagnostics().is_empty());
assert!(results[1].diagnostics().is_empty());
assert!(results[2].diagnostics().is_empty());
assert!(results[0].document.diagnostics().is_empty());
assert!(results[1].document.diagnostics().is_empty());
assert!(results[2].document.diagnostics().is_empty());

// Analyze the documents again
let results = analyzer.analyze(()).await.unwrap();
Loading