From 2c4888aceb8c0a3aac4715ca990faee44b88407a Mon Sep 17 00:00:00 2001 From: Suraj Chafle Date: Tue, 20 Sep 2022 12:23:59 -0700 Subject: [PATCH] Initial commit --- .gitignore | 72 + CHANGELOG.md | 90 ++ CONTRIBUTING.md | 85 ++ LICENSE | 19 + README.md | 215 +++ bug_report.md | 27 + cmd/compare/compare_cmd.go | 89 ++ cmd/linter/inline_lint_config.go | 92 ++ cmd/linter/lexer/README.md | 6 + cmd/linter/lexer/blockstring.go | 58 + cmd/linter/lexer/lexer.go | 515 +++++++ cmd/linter/lexer/token.go | 149 ++ cmd/linter/lint_cmd.go | 110 ++ cmd/linter/linter.go | 88 ++ cmd/root.go | 33 + go.mod | 13 + go.sum | 33 + gql.go | 7 + pkg/compare/compare.go | 1302 +++++++++++++++++ pkg/compare/compare_test.go | 1541 +++++++++++++++++++++ pkg/compare/test_schema/newSchema.graphql | 17 + pkg/compare/test_schema/oldSchema.graphql | 16 + pkg/linter/lint_error.go | 41 + pkg/linter/rules.go | 496 +++++++ pkg/linter/rules_test.go | 711 ++++++++++ pull_request_template.md | 30 + question.md | 25 + utils/util.go | 57 + 28 files changed, 5937 insertions(+) create mode 100644 .gitignore create mode 100644 CHANGELOG.md create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 README.md create mode 100644 bug_report.md create mode 100644 cmd/compare/compare_cmd.go create mode 100644 cmd/linter/inline_lint_config.go create mode 100644 cmd/linter/lexer/README.md create mode 100644 cmd/linter/lexer/blockstring.go create mode 100644 cmd/linter/lexer/lexer.go create mode 100644 cmd/linter/lexer/token.go create mode 100644 cmd/linter/lint_cmd.go create mode 100644 cmd/linter/linter.go create mode 100644 cmd/root.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 gql.go create mode 100644 pkg/compare/compare.go create mode 100644 pkg/compare/compare_test.go create mode 100644 pkg/compare/test_schema/newSchema.graphql create mode 100644 pkg/compare/test_schema/oldSchema.graphql create mode 100644 pkg/linter/lint_error.go create mode 100644 pkg/linter/rules.go create mode 100644 pkg/linter/rules_test.go create mode 100644 pull_request_template.md create mode 100644 question.md create mode 100644 utils/util.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c4bfbff --- /dev/null +++ b/.gitignore @@ -0,0 +1,72 @@ +#src/bitbucket.org +#src/github.com +#src/code.google.com + +# Web assests +web/ +web.tar.gz + +# IntelliJ file +.idea +go.iml +*.test +*.iml + +# Visual Studio Code files +.vscode +.favorites.json + +# Eclipse files +.project +.settings + +# Stupid OSX +._.DS_Store +.DS_Store + +# Hide fuse file for sshfs mounting from OSX +.fuse_* + +# debian packages +*.deb + +# Python +*.pyc + +# Log files +*.log + +# Distribution files +dist/ + +# Binary files +bin/ + +# Sonarqube +.scannerwork/ + +# Test coverage +*.coverprofile +coverage.out +coverage.stack +coverage.html +coverage.xml + +# Unit test +*checkstyle.xml +*junit.xml +report.json + +# Snyk +snyk.json + +# Crash log files +crash.log + +# Misc. +/environ.rc* +scrap*.* + +# go module workspace file +go.work + diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..08d6fbb --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,90 @@ +# Changelog +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + + + +## v0.11.0 (2022-08-04) + +### Bug Fixes + +* **compare**: fixed the case where a type can have repetitive directives (b146008) sonam.pahariya@crowdstrike.com + +### Chores + +* **dependency**: updating gqlparser version (6c5f466) suraj.chafle@crowdstrike.com + + + +## v0.10.0 (2022-07-27) + +### Bug Fixes + +* **make**: add 0 length to make to create slice (cc070c7) ECOMM-2581 steven.klassen@crowdstrike.com + + + +## v0.9.0 (2022-07-07) + +### Refactors + +* **common**: Update linter command to use common code (e129832) sonam.pahariya@crowdstrike.com + + + +## v0.8.0 (2022-06-28) + +### Bug Fixes + +* **compare**: error out if there are breaking changes in schema compare (81abb2f) sonam.pahariya@crowdstrike.com + + + +## v0.7.0 (2022-06-28) + +### Features + +* **compare**: Moved schema compare code from graphqlcompare repo (df92cb3) sonam.pahariya@crowdstrike.com + + + +## v0.6.0 (2022-06-27) + +### Bug Fixes + +* **rules**: fix the line number bug (ed88acd) suraj.chafle@crowdstrike.com + + + +## v0.5.0 (2022-06-25) + +### Features + +* **linter**: support for wildcards in schema file path (3e333b2) suraj.chafle@crowdstrike.com + + + +## v0.4.0 (2022-06-21) + +### Refactors + +* **linter**: moving code around to support multiple subcommands (2f94b9b) suraj.chafle@crowdstrike.com + + + +## v0.3.0 (2022-06-16) + +### Features + +* **ui**: ability to enable/disable part of schema for linting (c3369e8) suraj.chafle@crowdstrike.com + + + +## v0.2.0 (2022-05-26) + +### Features + +* **ui**: added help, users now can choose the rules to be applied; updated readme (58fc89f) suraj.chafle@crowdstrike.com + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..c93ef8e --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,85 @@ +# Contributing + +_Welcome!_ We're excited you want to take part in the CrowdStrike community! + +Please review this document for details regarding getting started with your first contribution, tools +you'll need to install as a developer, and our development and Pull Request process. If you have any +questions, please let us know by posting your question in the [discussion board](https://github.com/CrowdStrike/gql/discussions). + +## Table of Contents + +- [Code of Conduct](#code-of-conduct) +- [How you can contribute](#how-you-can-contribute) +- [Pull Requests](#pull-requests) + - [License](#License) + - [Breaking changes](#breaking-changes) + - [Code Coverage](#code-coverage) + - [Commit Messages](#commit-message-formatting-and-hygiene) + - [Pull Request template](#pull-request-template) + - [Approval/Mergin](#approval--merging) + +## Code of Conduct + +Please refer to CrowdStrike's general [Code of Conduct](https://opensource.crowdstrike.com/code-of-conduct/) +and [contribution guidelines](https://opensource.crowdstrike.com/contributing/). + +## How you can contribute + +- See something? Say something! Submit a [bug report](https://github.com/CrowdStrike/gql/issues/new?assignees=&labels=bug%2Ctriage&template=bug.md&title=) to let the community know what you've experienced or found. + - Please propose new features on the discussion board first. +- Join the [discussion board](https://github.com/CrowdStrike/gql/discussions) where you can: + - [Interact](https://github.com/CrowdStrike/gql/discussions/categories/general) with other members of the community + - [Start a discussion](https://github.com/CrowdStrike/gql/discussions/categories/ideas) or submit a [feature request](https://github.com/CrowdStrike/gql/issues/new?assignees=&labels=enhancement%2Ctriage&template=feature_request.md&title=) + - Provide [feedback](https://github.com/CrowdStrike/gql/discussions/categories/q-a) + - [Show others](https://github.com/CrowdStrike/gql/discussions/categories/show-and-tell) how you are using `gql` today +- Submit a [Pull Request](#pull-requests) + +## Pull Requests + +All code changes should be submitted via a Pull Request targeting the `main` branch. We are not assuming +that every merged PR creates a release, so we will not be automatically creating new SemVer tags as +a side effect of merging your Pull Request. Instead, we will manually tag new releases when required. + +### License +When you submit code changes, your submissions are understood to be under the same Unlicense [license](LICENSE) that covers the project. +If this is a concern, contact the maintainers before contributing. + +### Breaking changes +In an effort to maintain backwards compatibility, we thoroughly unit test every Pull Request for all +versions of PowerShell we support. These unit tests are intended to catch general programmatic errors, +possible vulnerabilities and _potential breaking changes_. + +Please fully document unit testing performed within your Pull Request. If you did not specify "Breaking Change" on the +punch list in the description, and the change is identified as possibly breaking, this may delay or prevent approval of your PR. + +### Code Coverage + +While we feel like achieving and maintaining 100% code coverage is often an untenable goal with +diminishing returns, any changes that reduce code coverage will receive pushback. We don't want +people to spend days trying to bump coverage from 97% to 98%, often at the expense of code clarity, +but that doesn't mean that we're okay with making things worse. + +### Commit Message Formatting and Hygiene + +We use [_Conventional Commits_](https://www.conventionalcommits.org/en/v1.0.0/) formatting for commit +messages, which we feel leads to a much more informative change history. Please familiarize yourself +with that specification and format your commit messages accordingly. + +Another aspect of achieving a clean, informative commit history is to avoid "noise" in commits. +Ideally, condense your changes to a single commit with a well-written _Conventional Commits_ message +before submitting a PR. In the rare case that a single PR is introducing more than one change, each +change should be a single commit with its own well-written message. + +### Pull Request template +Please use the pull request template provided, making sure the following details are included in your request: ++ Is this a breaking change? ++ Are all new or changed code paths covered by unit testing? ++ A complete listing of issues addressed or closed with this change. ++ A complete listing of any enhancements provided by this change. ++ Any usage details developers may need to make use of this new functionality. + - Does additional documentation need to be developed beyond what is listed in your Pull Request? ++ Any other salient points of interest. + +### Approval / Merging +All Pull Requests must be approved by at least one maintainer. Once approved, a maintainer will perform the merge and execute any backend +processes related to package deployment. At this time, contributors _do not_ have the ability to merge to the `main` branch. \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..4c89334 --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2022 CrowdStrike, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..77293ab --- /dev/null +++ b/README.md @@ -0,0 +1,215 @@ +# gql +`gql` is a collection of tools to manage GraphQL schema. The tool can be used for linting schema and finding breaking changes in the schema. + +## Usage +`gql` can be installed with `go install github.com/CrowdStrike/gql` +```shell +~ $ gql -h +gql is a CLI built for federated GraphQL services' schemas + +Usage: + gql [command] + +Available Commands: + compare compare two graphql schemas + help Help about any command + lint lints given GraphQL schema + +Flags: + -h, --help help for gql + +Use "gql [command] --help" for more information about a command. +``` + +## linter +The linter is inspired from [graphql-schema-linter](https://github.com/cjoudrey/graphql-schema-linter/) with changes for supporting Apollo federation. +### How to use? +Help command prints help message. This has all the supported rules. +```shell +~ $ gql lint -h +lints given GraphQL schema + +Usage: + gql lint [flags] + +Flags: + -f, --filepath string Path to your GraphQL schema + -h, --help help for lint + -r, --rules strings Rules you want linter to use e.g.(-r type-desc,field-desc); available rules: + type-desc => type-desc checks whether all the types defined have description + args-desc => args-desc checks whether arguments have description + field-desc => field-desc checks whether fields have description + enum-caps => enum-caps checks whether Enum values are all UPPER_CASE + enum-desc => enum-desc checks whether Enum values have description + field-camel => field-camel checks whether fields defined are all camelCase + type-caps => type-caps checks whether types defined are Capitalized + relay-conn-type => relay-conn-type checks if Connection Types follow the Relay Cursor Connections Specification + relay-conn-args => relay-conn-args checks if Connection Args follow of the Relay Cursor Connections Specification +``` +Specifying the schema file: +```shell +~ $ gql lint -f schema.graphqls +2022/05/28 12:53:01 16 errors occurred: + * 6:3 field Todo.id does not have description + * 7:3 field Todo.text does not have description + * 8:3 field Todo.done does not have description + * 9:3 field Todo.user does not have description + * 13:3 field User.id does not have description + * 14:3 field User.todos does not have description + * 14:9 argument User.todos.offset does not have description + * 14:21 argument User.todos.limir does not have description + * 15:3 field User.color does not have description + * 19:3 field Query.todos does not have description + * 19:9 argument Query.todos.offset does not have description + * 19:22 argument Query.todos.limit does not have description + * 23:3 field NewTodo.text does not have description + * 24:3 field NewTodo.userId does not have description + * 28:3 field Mutation.createTodo does not have description + * 28:14 argument Mutation.createTodo.input does not have description +``` + +Reading schema from stdin: +```shell +~ $ cat schema.graphqls | gql lint +2022/05/28 12:54:48 16 errors occurred: + * 6:3 field Todo.id does not have description + * 7:3 field Todo.text does not have description + * 8:3 field Todo.done does not have description + * 9:3 field Todo.user does not have description + * 13:3 field User.id does not have description + * 14:3 field User.todos does not have description + * 14:9 argument User.todos.offset does not have description + * 14:21 argument User.todos.limir does not have description + * 15:3 field User.color does not have description + * 19:3 field Query.todos does not have description + * 19:9 argument Query.todos.offset does not have description + * 19:22 argument Query.todos.limit does not have description + * 23:3 field NewTodo.text does not have description + * 24:3 field NewTodo.userId does not have description + * 28:3 field Mutation.createTodo does not have description + * 28:14 argument Mutation.createTodo.input does not have description +``` +Passing subset of rules to be applied +```shell +~ $ gql lint -f schema.graphqls -r types-have-description +2022/05/28 13:23:20 1 error occurred: + * 3 errors occurred: + * 5:6 type Todo does not have description + * 22:7 type NewTodo does not have description + * 27:6 type Mutation does not have description +``` +Specifying wildcards for schema file paths +```shell +graphql-linter -f '*.graphqls' -r types-have-description +2022/05/28 13:23:20 1 error occurred: + * 3 errors occurred: + * 5:6 type Todo does not have description + * 22:7 type NewTodo does not have description + * 27:6 type Mutation does not have description +``` + +> Note: If your argument has wildcards your shell can execute the glob and provide individual values to graphql-linter. +> So don't forget the quotes around path with wildcards. + +## Available rules +Following table describes all the lint rules supported by the linter + +| Lint Rule | Description | +| :-------------: |:--------------| +| type-desc | type-desc checks whether all the types defined have description | +| args-desc | args-desc checks whether arguments have description | +| field-desc | field-desc checks whether fields have description | +| enum-caps | enum-caps checks whether enum values are all UPPER_CASE | +| enum-desc | enum-desc checks whether enum values have description | +| field-camel | field-camel checks whether fields defined are all camelCase | +| type-caps | type-caps checks whether types defined are Capitalized | +| relay-conn-type | relay-conn-type checks whether types defined are following relay cursor connection spec | +| relay-conn-args | relay-conn-args checks whether args defined are following relay cursor connection spec | + + +### Enabling and disabling certain rules +It is possible that you want certain part of your schema to be ignored for linting. You can do it with comments in your schema: + +#### ignoring multiple lines: +If you use `#lint-disable rule1` on line x and `#lint-enable rule1` for line y where x < y then errors because of rule1 +between line x and y will be ignored from the output. + +#### ignoring single line: +You can use `#lint-disable-line rule1` to disable rule1 for a specific line. This only is applicable if rule1 is applied +for rest of the schema. If rule1 is not one among the rules passed then this has no effect on output. + +##compare +compare command compares two schema files and returns all the differences. It is also built to support Apollo federation +specification and can be used to find breaking changes in schema. + + ### How to use? + ```shell +$gql compare -h +compare two graphql schemas + +Usage: + gql compare [flags] + +Flags: + -b, --breaking-change-only Get breaking change only + -e, --exclude-print-filepath Exclude printing schema filepath positions + -h, --help help for compare + -n, --newversion string Path to your new version of GraphQL schema + -o, --oldversion string Path to your older version of GraphQL schema +``` + +Compare the schema +```shell +~ $ gql compare -o oldSchema.graphql -n newSchema.graphql +❌ /Users/spahariya/code/temp/newSchema/library.graphql:2 Argument 'from' type changed from 'String' to 'String!' in directive '@transform' +❌ /Users/spahariya/code/temp/newSchema/library.graphql:20 Field 'Book.title' type changed from 'String!' to 'String' in OBJECT +❌ /Users/spahariya/code/temp/newSchema/library.graphql:18 Field 'Book.year' was removed from OBJECT +❌ /Users/spahariya/code/temp/newSchema/user.graphql:12 Input field 'UserInput.adBooks' type changed from '[Book]' to '[Book]!' in input object type +❌ /Users/spahariya/code/temp/newSchema/user.graphql:11 Input field 'UserInput.newBooks' type changed from '[Book]!' to '[Book!]!' in input object type +✋ /Users/spahariya/code/temp/newSchema/library.graphql:4 Member 'text' was added to Union type 'body' +✅ /Users/spahariya/code/temp/newSchema/library.graphql:19 Field 'Book.isbn' type changed from 'String' to 'String!' in OBJECT +✅ /Users/spahariya/code/temp/newSchema/library.graphql:15 Field 'Library.books' was added to OBJECT +✅ /Users/spahariya/code/temp/newSchema/library.graphql:8 Field 'Query.books' was added to OBJECT +✅ /Users/spahariya/code/temp/newSchema/user.graphql:6 Field 'User.address' was added to OBJECT +✅ /Users/spahariya/code/temp/newSchema/user.graphql:5 Field 'User.name' type changed from 'String' to 'String!' in OBJECT +Breaking errors in schema: 5 +``` +If user want to exclude schema filepath positions from the stdout, pass -e ot --exclude-print-filepath option with the command +``` +~ $ gql compare -o "oldGraphql/*.graphql" -n "newGraphql/*.graphql" -e +❌ Field 'Book.isbn' type changed from 'String!' to 'String' in OBJECT +❌ Field 'Library.books' was removed from OBJECT +❌ Field 'Query.books' was removed from OBJECT +✋ Member 'text' was added to Union type 'body' +✅ Argument 'from' type changed from 'String!' to 'String' in directive '@transform' +✅ Field 'Book.title' type changed from 'String' to 'String!' in OBJECT +✅ Field 'Book.year' was added to OBJECT +Breaking errors in schema: 3 +``` + +## Type of changes in schema +Generally speaking either a change can break API contract with client or it won't. But in case of GraphQL there's another category of changes, +which won't actually break clients but will change their behavior and if not handled properly in code will cause client-side errors. Thus developers need +to pay special attention to not only what are the breaking changes but also the dangerous ones. Following is the list of breaking and dangerous changes which can be made +in graphql schema. + +* **Breaking❌**: Changes that will break existing queries to the GraphQL API. Below are the type of changes that comes in breaking change category. + - Schema root operation type (`Query`, `Mutation`, `Subscription`) changed or removed + - `type`/`field`/`directive`/`interface` removed + - `type` kind changed + - `directive` location changed + - `field` type changed or made optional + - `input` type changed or made required or required fields added + - argument type changed or removed or made required or required arguments added + - `enum` value removed + - Union member removed + + +* **Dangerous✋** : Changes that won't break existing queries but could affect the runtime behavior of clients. Below are the type of changes that comes in dangerous change category. + - Argument default value changed + - `directive` optional argument added + - Deprecation added/removed to `field`/`enum` values + - `enum` value added + - Interfaces added to object implements + - Union member added + - Input fields added \ No newline at end of file diff --git a/bug_report.md b/bug_report.md new file mode 100644 index 0000000..733c31f --- /dev/null +++ b/bug_report.md @@ -0,0 +1,27 @@ +--- +name: Bug report +about: Create a report to help `gql` improve. +title: "[ BUG ] " +labels: bug +assignees: 'schafle' +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior. + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Environment (please complete the following information):** + - OS: [e.g. Windows Server 2016, Windows 10] + - golang: [e.g. 1.17] + - gql: [e.g. 0.1.0] + +**Additional context** +Add any other context about the problem here. + +**Transcript content** +If possible, please include output of `gql` cli. \ No newline at end of file diff --git a/cmd/compare/compare_cmd.go b/cmd/compare/compare_cmd.go new file mode 100644 index 0000000..efc723c --- /dev/null +++ b/cmd/compare/compare_cmd.go @@ -0,0 +1,89 @@ +package compare + +import ( + "fmt" + "os" + + "github.com/spf13/cobra" + + "github.com/CrowdStrike/gql/pkg/compare" + "github.com/CrowdStrike/gql/utils" +) + +var ( + oldSchemaPath string + newSchemaPath string + onlyBreakingChange bool + excludeFilePath bool +) + +//NewCompareCmd creates new compare command +func NewCompareCmd() *cobra.Command { + compareCmd := &cobra.Command{ + Use: "compare", + Short: "compare two graphql schemas", + Long: "compare two graphql schemas", + Run: func(cmd *cobra.Command, args []string) { + if len(oldSchemaPath) == 0 || len(newSchemaPath) == 0 { + fmt.Print("compare expects two version of schemas in the arguments\n") + os.Exit(1) + } + if oldSchemaPath == newSchemaPath { + fmt.Printf("Both old '%s' and new '%s' schema path are same\n", oldSchemaPath, newSchemaPath) + os.Exit(1) + } + + schemaOldContents, err := utils.ReadFiles(oldSchemaPath) + if err != nil { + fmt.Printf("failed to read schema files on filepath:%s, error:%v", oldSchemaPath, err) + os.Exit(1) + } + + schemaNewContents, err := utils.ReadFiles(newSchemaPath) + if err != nil { + fmt.Printf("failed to read schema files on filepath:%s, error:%v", newSchemaPath, err) + os.Exit(1) + } + + schemaOld, parseErr := utils.ParseSchema(schemaOldContents) + if parseErr != nil { + fmt.Printf("Error parsing schema content on path=%s, error:%v", oldSchemaPath, parseErr) + os.Exit(1) + } + + schemaNew, parseErr := utils.ParseSchema(schemaNewContents) + if parseErr != nil { + fmt.Printf("Error parsing schema content on path=%s, error:%v", newSchemaPath, parseErr) + os.Exit(1) + } + exitStatus := 0 + changes := compare.FindChangesInSchemas(schemaOld, schemaNew) + if len(changes) == 0 { + fmt.Println("No changes found on schema compare!") + } else { + changeCriticalityMap := compare.GroupChanges(changes) + errorCount := 0 + //print changes + if onlyBreakingChange { + errorCount = compare.ReportBreakingChanges(changeCriticalityMap[compare.Breaking], !excludeFilePath) + } else { + errorCount = compare.ReportBreakingChanges(changeCriticalityMap[compare.Breaking], !excludeFilePath) + compare.ReportDangerousChanges(changeCriticalityMap[compare.Dangerous], !excludeFilePath) + compare.ReportNonBreakingChanges(changeCriticalityMap[compare.NonBreaking], !excludeFilePath) + } + if errorCount == 0 { + fmt.Println("No breaking changes found 🎉") + } else { + fmt.Printf("\n❌ Breaking changes in schema: %d\n", errorCount) + exitStatus |= 1 + } + } + os.Exit(exitStatus) + }, + } + compareCmd.PersistentFlags().StringVarP(&oldSchemaPath, "oldversion", "o", "", "Path to your older version of GraphQL schema") + compareCmd.PersistentFlags().StringVarP(&newSchemaPath, "newversion", "n", "", "Path to your new version of GraphQL schema") + compareCmd.PersistentFlags().BoolVarP(&onlyBreakingChange, "breaking-change-only", "b", false, "Get breaking change only") + compareCmd.PersistentFlags().BoolVarP(&excludeFilePath, "exclude-print-filepath", "e", false, "Exclude printing schema filepath positions") + return compareCmd +} diff --git a/cmd/linter/inline_lint_config.go b/cmd/linter/inline_lint_config.go new file mode 100644 index 0000000..dd85f1c --- /dev/null +++ b/cmd/linter/inline_lint_config.go @@ -0,0 +1,92 @@ +package linter + +import ( + "strings" + + "github.com/CrowdStrike/gql/cmd/linter/lexer" + + "github.com/vektah/gqlparser/v2/ast" +) + +type inlineLintConfigMetadata struct { + pos ast.Position + value string +} + +const ( + lintDisable lintCommand = "lint-disable" + lintEnable lintCommand = "lint-enable" + lintDisableLine lintCommand = "lint-disable-line" +) + +func extractInlineLintConfiguration(source *ast.Source) []InlineLintConfig { + inlineLintConfigs := make([]inlineLintConfigMetadata, 0) + + s := lexer.New(source) + + token, err := s.ReadToken() + if err != nil { // If we reach here then, lexer should not return error now + panic(err) + } + + for { + if token.Kind == lexer.Comment && strings.HasPrefix(token.Value, "#lint-") { + inlineLintConfigs = append(inlineLintConfigs, inlineLintConfigMetadata{ + pos: token.Pos, + value: token.Value, + }) + } + token, err = s.ReadToken() + if err != nil { // If we reach here then, lexer should not return error now + panic(err) + } + if token.Kind == lexer.EOF { + break + } + } + + inlineLintConfigRules := make([]InlineLintConfig, 0, len(inlineLintConfigs)) + + // Now that we know which lines have lint configs in comments, lets parse them + for _, linConfig := range inlineLintConfigs { + matchGroups := inlineLintConfigurationRegex.FindStringSubmatch(linConfig.value) + switch matchGroups[1] { + case string(lintDisable): + rulesString := matchGroups[2] + rulesToApply := sanitizeRules(rulesString) + inlineRule := InlineLintConfig{ + command: lintDisable, + rules: rulesToApply, + pos: linConfig.pos.Line, + } + inlineLintConfigRules = append(inlineLintConfigRules, inlineRule) + case string(lintEnable): + rulesString := matchGroups[2] + rulesToApply := sanitizeRules(rulesString) + inlineRule := InlineLintConfig{ + command: lintEnable, + rules: rulesToApply, + pos: linConfig.pos.Line, + } + inlineLintConfigRules = append(inlineLintConfigRules, inlineRule) + case string(lintDisableLine): + rulesString := matchGroups[2] + rulesToApply := sanitizeRules(rulesString) + inlineRule := InlineLintConfig{ + command: lintDisableLine, + rules: rulesToApply, + pos: linConfig.pos.Line, + } + inlineLintConfigRules = append(inlineLintConfigRules, inlineRule) + } + } + return inlineLintConfigRules +} + +func sanitizeRules(rulesString string) []string { + rulesToApply := make([]string, 0) + for _, rule := range strings.Split(rulesString, ",") { + rulesToApply = append(rulesToApply, strings.TrimSpace(rule)) + } + return rulesToApply +} diff --git a/cmd/linter/lexer/README.md b/cmd/linter/lexer/README.md new file mode 100644 index 0000000..8864594 --- /dev/null +++ b/cmd/linter/lexer/README.md @@ -0,0 +1,6 @@ +We are using [gqlparser](https://github.com/vektah/gqlparser) for parsing GraphQL schema. +Lexer in gqlparser drops all the comments when it returns the ast. Since we are using comments to enable/disable lint rules +we are using a slightly modified (only change being not dropping the comments) lexer from gqlparser. + +If in future gqlparser supports this functionality or there is another lexer which can give us access to comments in schema, +we would remove this copied code. \ No newline at end of file diff --git a/cmd/linter/lexer/blockstring.go b/cmd/linter/lexer/blockstring.go new file mode 100644 index 0000000..4065a61 --- /dev/null +++ b/cmd/linter/lexer/blockstring.go @@ -0,0 +1,58 @@ +package lexer + +import ( + "math" + "strings" +) + +// blockStringValue produces the value of a block string from its parsed raw value, similar to +// Coffeescript's block string, Python's docstring trim or Ruby's strip_heredoc. +// +// This implements the GraphQL spec's BlockStringValue() static algorithm. +func blockStringValue(raw string) string { + lines := strings.Split(raw, "\n") + + commonIndent := math.MaxInt32 + for _, line := range lines { + indent := leadingWhitespace(line) + if indent < len(line) && indent < commonIndent { + commonIndent = indent + if commonIndent == 0 { + break + } + } + } + + if commonIndent != math.MaxInt32 && len(lines) > 0 { + for i := 1; i < len(lines); i++ { + if len(lines[i]) < commonIndent { + lines[i] = "" + } else { + lines[i] = lines[i][commonIndent:] + } + } + } + + start := 0 + end := len(lines) + + for start < end && leadingWhitespace(lines[start]) == math.MaxInt32 { + start++ + } + + for start < end && leadingWhitespace(lines[end-1]) == math.MaxInt32 { + end-- + } + + return strings.Join(lines[start:end], "\n") +} + +func leadingWhitespace(str string) int { + for i, r := range str { + if r != ' ' && r != '\t' { + return i + } + } + // this line is made up entirely of whitespace, its leading whitespace doesnt count. + return math.MaxInt32 +} diff --git a/cmd/linter/lexer/lexer.go b/cmd/linter/lexer/lexer.go new file mode 100644 index 0000000..8b5ce8d --- /dev/null +++ b/cmd/linter/lexer/lexer.go @@ -0,0 +1,515 @@ +// nolint: revive,goconst,gocritic +package lexer + +import ( + "bytes" + "unicode/utf8" + + "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/gqlerror" +) + +// Lexer turns graphql request and schema strings into tokens +type Lexer struct { + *ast.Source + // An offset into the string in bytes + start int + // An offset into the string in runes + startRunes int + // An offset into the string in bytes + end int + // An offset into the string in runes + endRunes int + // the current line number + line int + // An offset into the string in rune + lineStartRunes int +} + +func New(src *ast.Source) Lexer { + return Lexer{ + Source: src, + line: 1, + } +} + +// take one rune from input and advance end +func (s *Lexer) peek() (rune, int) { + return utf8.DecodeRuneInString(s.Input[s.end:]) +} + +func (s *Lexer) makeToken(kind Type) (Token, error) { + return s.makeValueToken(kind, s.Input[s.start:s.end]) +} + +func (s *Lexer) makeValueToken(kind Type, value string) (Token, error) { + return Token{ + Kind: kind, + Value: value, + Pos: ast.Position{ + Start: s.startRunes, + End: s.endRunes, + Line: s.line, + Column: s.startRunes - s.lineStartRunes + 1, + Src: s.Source, + }, + }, nil +} + +func (s *Lexer) makeError(format string, args ...interface{}) (Token, error) { + column := s.endRunes - s.lineStartRunes + 1 + return Token{ + Kind: Invalid, + Pos: ast.Position{ + Start: s.startRunes, + End: s.endRunes, + Line: s.line, + Column: column, + Src: s.Source, + }, + }, gqlerror.ErrorLocf(s.Source.Name, s.line, column, format, args...) +} + +// ReadToken gets the next token from the source starting at the given position. +// +// This skips over whitespace and comments until it finds the next lexable +// token, then lexes punctuators immediately or calls the appropriate helper +// function for more complicated tokens. +func (s *Lexer) ReadToken() (token Token, err error) { + + s.ws() + s.start = s.end + s.startRunes = s.endRunes + + if s.end >= len(s.Input) { + return s.makeToken(EOF) + } + r := s.Input[s.start] + s.end++ + s.endRunes++ + switch r { + case '!': + return s.makeValueToken(Bang, "") + + case '$': + return s.makeValueToken(Dollar, "") + case '&': + return s.makeValueToken(Amp, "") + case '(': + return s.makeValueToken(ParenL, "") + case ')': + return s.makeValueToken(ParenR, "") + case '.': + if len(s.Input) > s.start+2 && s.Input[s.start:s.start+3] == "..." { + s.end += 2 + s.endRunes += 2 + return s.makeValueToken(Spread, "") + } + case ':': + return s.makeValueToken(Colon, "") + case '=': + return s.makeValueToken(Equals, "") + case '@': + return s.makeValueToken(At, "") + case '[': + return s.makeValueToken(BracketL, "") + case ']': + return s.makeValueToken(BracketR, "") + case '{': + return s.makeValueToken(BraceL, "") + case '}': + return s.makeValueToken(BraceR, "") + case '|': + return s.makeValueToken(Pipe, "") + case '#': + return s.readComment() + case '_', 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z': + return s.readName() + + case '-', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return s.readNumber() + + case '"': + if len(s.Input) > s.start+2 && s.Input[s.start:s.start+3] == `"""` { + return s.readBlockString() + } + + return s.readString() + } + + s.end-- + s.endRunes-- + + if r < 0x0020 && r != 0x0009 && r != 0x000a && r != 0x000d { + return s.makeError(`Cannot contain the invalid character "\u%04d"`, r) + } + + if r == '\'' { + return s.makeError(`Unexpected single quote character ('), did you mean to use a double quote (")?`) + } + + return s.makeError(`Cannot parse the unexpected character "%s".`, string(r)) +} + +// ws reads from body starting at startPosition until it finds a non-whitespace +// or commented character, and updates the token end to include all whitespace +func (s *Lexer) ws() { + for s.end < len(s.Input) { + switch s.Input[s.end] { + case '\t', ' ', ',': + s.end++ + s.endRunes++ + case '\n': + s.end++ + s.endRunes++ + s.line++ + s.lineStartRunes = s.endRunes + case '\r': + s.end++ + s.endRunes++ + s.line++ + s.lineStartRunes = s.endRunes + // skip the following newline if its there + if s.end < len(s.Input) && s.Input[s.end] == '\n' { + s.end++ + s.endRunes++ + } + // byte order mark, given ws is hot path we aren't relying on the unicode package here. + case 0xef: + if s.end+2 < len(s.Input) && s.Input[s.end+1] == 0xBB && s.Input[s.end+2] == 0xBF { + s.end += 3 + s.endRunes++ + } else { + return + } + default: + return + } + } +} + +// readComment from the input +// +// #[\u0009\u0020-\uFFFF]* +func (s *Lexer) readComment() (Token, error) { + for s.end < len(s.Input) { + r, w := s.peek() + + // SourceCharacter but not LineTerminator + if r > 0x001f || r == '\t' { + s.end += w + s.endRunes++ + } else { + break + } + } + + return s.makeToken(Comment) +} + +// readNumber from the input, either a float +// or an int depending on whether a decimal point appears. +// +// Int: -?(0|[1-9][0-9]*) +// Float: -?(0|[1-9][0-9]*)(\.[0-9]+)?((E|e)(+|-)?[0-9]+)? +func (s *Lexer) readNumber() (Token, error) { + float := false + + // backup to the first digit + s.end-- + s.endRunes-- + + s.acceptByte('-') + + if s.acceptByte('0') { + if consumed := s.acceptDigits(); consumed != 0 { + s.end -= consumed + s.endRunes -= consumed + return s.makeError("Invalid number, unexpected digit after 0: %s.", s.describeNext()) + } + } else { + if consumed := s.acceptDigits(); consumed == 0 { + return s.makeError("Invalid number, expected digit but got: %s.", s.describeNext()) + } + } + + if s.acceptByte('.') { + float = true + + if consumed := s.acceptDigits(); consumed == 0 { + return s.makeError("Invalid number, expected digit but got: %s.", s.describeNext()) + } + } + + if s.acceptByte('e', 'E') { + float = true + + s.acceptByte('-', '+') + + if consumed := s.acceptDigits(); consumed == 0 { + return s.makeError("Invalid number, expected digit but got: %s.", s.describeNext()) + } + } + + if float { + return s.makeToken(Float) + } else { + return s.makeToken(Int) + } +} + +// acceptByte if it matches any of given bytes, returning true if it found anything +func (s *Lexer) acceptByte(bytes ...uint8) bool { + if s.end >= len(s.Input) { + return false + } + + for _, accepted := range bytes { + if s.Input[s.end] == accepted { + s.end++ + s.endRunes++ + return true + } + } + return false +} + +// acceptDigits from the input, returning the number of digits it found +func (s *Lexer) acceptDigits() int { + consumed := 0 + for s.end < len(s.Input) && s.Input[s.end] >= '0' && s.Input[s.end] <= '9' { + s.end++ + s.endRunes++ + consumed++ + } + + return consumed +} + +// describeNext peeks at the input and returns a human readable string. This should will alloc +// and should only be used in errors +func (s *Lexer) describeNext() string { + if s.end < len(s.Input) { + return `"` + string(s.Input[s.end]) + `"` + } + return "" +} + +// readString from the input +// +// "([^"\\\u000A\u000D]|(\\(u[0-9a-fA-F]{4}|["\\/bfnrt])))*" +func (s *Lexer) readString() (Token, error) { + inputLen := len(s.Input) + + // this buffer is lazily created only if there are escape characters. + var buf *bytes.Buffer + + // skip the opening quote + s.start++ + s.startRunes++ + + for s.end < inputLen { + r := s.Input[s.end] + if r == '\n' || r == '\r' { + break + } + if r < 0x0020 && r != '\t' { + return s.makeError(`Invalid character within String: "\u%04d".`, r) + } + switch r { + default: + var char = rune(r) + var w = 1 + + // skip unicode overhead if we are in the ascii range + if r >= 127 { + char, w = utf8.DecodeRuneInString(s.Input[s.end:]) + } + s.end += w + s.endRunes++ + + if buf != nil { + buf.WriteRune(char) + } + + case '"': + t, err := s.makeToken(String) + // the token should not include the quotes in its value, but should cover them in its position + t.Pos.Start-- + t.Pos.End++ + + if buf != nil { + t.Value = buf.String() + } + + // skip the close quote + s.end++ + s.endRunes++ + + return t, err + + case '\\': + if s.end+1 >= inputLen { + s.end++ + s.endRunes++ + return s.makeError(`Invalid character escape sequence.`) + } + + if buf == nil { + buf = bytes.NewBufferString(s.Input[s.start:s.end]) + } + + escape := s.Input[s.end+1] + + if escape == 'u' { + if s.end+6 >= inputLen { + s.end++ + s.endRunes++ + return s.makeError("Invalid character escape sequence: \\%s.", s.Input[s.end:]) + } + + r, ok := unhex(s.Input[s.end+2 : s.end+6]) + if !ok { + s.end++ + s.endRunes++ + return s.makeError("Invalid character escape sequence: \\%s.", s.Input[s.end:s.end+5]) + } + buf.WriteRune(r) + s.end += 6 + s.endRunes += 6 + } else { + switch escape { + case '"', '/', '\\': + buf.WriteByte(escape) + case 'b': + buf.WriteByte('\b') + case 'f': + buf.WriteByte('\f') + case 'n': + buf.WriteByte('\n') + case 'r': + buf.WriteByte('\r') + case 't': + buf.WriteByte('\t') + default: + s.end += 1 + s.endRunes += 1 + return s.makeError("Invalid character escape sequence: \\%s.", string(escape)) + } + s.end += 2 + s.endRunes += 2 + } + } + } + + return s.makeError("Unterminated string.") +} + +// readBlockString from the input +// +// """("?"?(\\"""|\\(?!=""")|[^"\\]))*""" +func (s *Lexer) readBlockString() (Token, error) { + inputLen := len(s.Input) + + var buf bytes.Buffer + + // skip the opening quote + s.start += 3 + s.startRunes += 3 + s.end += 2 + s.endRunes += 2 + + for s.end < inputLen { + r := s.Input[s.end] + + // Closing triple quote (""") + if r == '"' && s.end+3 <= inputLen && s.Input[s.end:s.end+3] == `"""` { + t, err := s.makeValueToken(BlockString, blockStringValue(buf.String())) + + // the token should not include the quotes in its value, but should cover them in its position + t.Pos.Start -= 3 + t.Pos.End += 3 + + // skip the close quote + s.end += 3 + s.endRunes += 3 + + return t, err + } + + // SourceCharacter + if r < 0x0020 && r != '\t' && r != '\n' && r != '\r' { + return s.makeError(`Invalid character within String: "\u%04d".`, r) + } + + if r == '\\' && s.end+4 <= inputLen && s.Input[s.end:s.end+4] == `\"""` { + buf.WriteString(`"""`) + s.end += 4 + s.endRunes += 4 + } else if r == '\r' { + if s.end+1 < inputLen && s.Input[s.end+1] == '\n' { + s.end++ + s.endRunes++ + } + + buf.WriteByte('\n') + s.end++ + s.endRunes++ + s.line++ + s.lineStartRunes = s.endRunes + } else { + var char = rune(r) + var w = 1 + + // skip unicode overhead if we are in the ascii range + if r >= 127 { + char, w = utf8.DecodeRuneInString(s.Input[s.end:]) + } + s.end += w + s.endRunes++ + buf.WriteRune(char) + if r == '\n' { + s.line++ + s.lineStartRunes = s.endRunes + } + } + } + + return s.makeError("Unterminated string.") +} + +func unhex(b string) (v rune, ok bool) { + for _, c := range b { + v <<= 4 + switch { + case '0' <= c && c <= '9': + v |= c - '0' + case 'a' <= c && c <= 'f': + v |= c - 'a' + 10 + case 'A' <= c && c <= 'F': + v |= c - 'A' + 10 + default: + return 0, false + } + } + + return v, true +} + +// readName from the input +// +// [_A-Za-z][_0-9A-Za-z]* +func (s *Lexer) readName() (Token, error) { + for s.end < len(s.Input) { + r, w := s.peek() + + if (r >= '0' && r <= '9') || (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') || r == '_' { + s.end += w + s.endRunes++ + } else { + break + } + } + + return s.makeToken(Name) +} diff --git a/cmd/linter/lexer/token.go b/cmd/linter/lexer/token.go new file mode 100644 index 0000000..fcb6b16 --- /dev/null +++ b/cmd/linter/lexer/token.go @@ -0,0 +1,149 @@ +// nolint: revive,goconst,gocritic +package lexer + +import ( + "strconv" + + "github.com/vektah/gqlparser/v2/ast" +) + +const ( + Invalid Type = iota + EOF + Bang + Dollar + Amp + ParenL + ParenR + Spread + Colon + Equals + At + BracketL + BracketR + BraceL + BraceR + Pipe + Name + Int + Float + String + BlockString + Comment +) + +func (t Type) Name() string { + switch t { + case Invalid: + return "Invalid" + case EOF: + return "EOF" + case Bang: + return "Bang" + case Dollar: + return "Dollar" + case Amp: + return "Amp" + case ParenL: + return "ParenL" + case ParenR: + return "ParenR" + case Spread: + return "Spread" + case Colon: + return "Colon" + case Equals: + return "Equals" + case At: + return "At" + case BracketL: + return "BracketL" + case BracketR: + return "BracketR" + case BraceL: + return "BraceL" + case BraceR: + return "BraceR" + case Pipe: + return "Pipe" + case Name: + return "Name" + case Int: + return "Int" + case Float: + return "Float" + case String: + return "String" + case BlockString: + return "BlockString" + case Comment: + return "Comment" + } + return "Unknown " + strconv.Itoa(int(t)) +} + +func (t Type) String() string { + switch t { + case Invalid: + return "" + case EOF: + return "" + case Bang: + return "!" + case Dollar: + return "$" + case Amp: + return "&" + case ParenL: + return "(" + case ParenR: + return ")" + case Spread: + return "..." + case Colon: + return ":" + case Equals: + return "=" + case At: + return "@" + case BracketL: + return "[" + case BracketR: + return "]" + case BraceL: + return "{" + case BraceR: + return "}" + case Pipe: + return "|" + case Name: + return "Name" + case Int: + return "Int" + case Float: + return "Float" + case String: + return "String" + case BlockString: + return "BlockString" + case Comment: + return "Comment" + } + return "Unknown " + strconv.Itoa(int(t)) +} + +// Kind represents a type of token. The types are predefined as constants. +type Type int + +type Token struct { + Kind Type // The token type. + Value string // The literal value consumed. + Pos ast.Position // The file and line this token was read from +} + +func (t Token) String() string { + if t.Value != "" { + return t.Kind.String() + " " + strconv.Quote(t.Value) + } + return t.Kind.String() +} diff --git a/cmd/linter/lint_cmd.go b/cmd/linter/lint_cmd.go new file mode 100644 index 0000000..f7e3741 --- /dev/null +++ b/cmd/linter/lint_cmd.go @@ -0,0 +1,110 @@ +package linter + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/CrowdStrike/gql/pkg/linter" + + "github.com/CrowdStrike/gql/utils" + + "github.com/spf13/cobra" +) + +var ( + schemaFilePath string + passedRules []string +) + +// NewLintCmd creates new lint command +func NewLintCmd() *cobra.Command { + // addCmd represents the add command + lintCmd := &cobra.Command{ + Use: "lint", + Short: "lints given GraphQL schema", + Long: `lints given GraphQL schema`, + Run: func(cmd *cobra.Command, args []string) { + schemaFileContents := make(map[string][]byte) + + rulesToApply, err := findTheRulesToApply(passedRules) + if err != nil { + fmt.Printf("failed to parse the rules to apply string=%s, error:%v", passedRules, err) + //Exit with error printed to stderr + os.Exit(1) + } + + if len(schemaFilePath) == 0 { + content, err := io.ReadAll(os.Stdin) + if err != nil { + fmt.Printf("failed to input from stdin with error %v", err) + os.Exit(1) + } + schemaFileContents[os.Stdin.Name()] = content + } else { + var err error + schemaFileContents, err = utils.ReadFiles(schemaFilePath) + if err != nil { + fmt.Printf("failed to read schema file error %v", err) + os.Exit(1) + } + } + + exitStatus := 0 + errorCount := 0 + for filename, schemaFileContent := range schemaFileContents { + if lintErrors := Lint(filename, string(schemaFileContent), rulesToApply); len(lintErrors) != 0 { + errorCount += len(lintErrors) + errorPresenter(filename, lintErrors) + exitStatus |= 1 // If there's error for any file, exit code should be 1 + } + } + + if errorCount == 0 { + fmt.Printf("Schema has no lint errors! 🎉\n") + } else { + fmt.Printf("❌ Total lint errors found: %d\n", errorCount) + } + + os.Exit(exitStatus) // success + }, + } + lintCmd.PersistentFlags().StringVarP(&schemaFilePath, "filepath", "f", "", "Path to your GraphQL schema") + lintCmd.PersistentFlags().StringSliceVarP(&passedRules, "rules", "r", []string{}, fmt.Sprintf("Rules you want linter to use e.g.(-r type-desc,field-desc); available rules:\n %s", linter.AvailableRulesWithDescription())) + return lintCmd +} + +func findTheRulesToApply(rulesString []string) ([]linter.LintRuleFunc, error) { + rulesToApply := make([]linter.LintRuleFunc, 0) + if len(rulesString) == 0 { + for _, rule := range linter.AllTheRules { + rulesToApply = append(rulesToApply, rule.RuleFunction) + } + return rulesToApply, nil + } + for _, ruleToken := range rulesString { + inputRuleName := strings.TrimSpace(ruleToken) + // Check whether the rule passed exists in our rule list + matchFound := false + for _, rule := range linter.AllTheRules { + if strings.EqualFold(inputRuleName, string(rule.Name)) { + matchFound = true + rulesToApply = append(rulesToApply, rule.RuleFunction) + break + } + } + if !matchFound { + return nil, fmt.Errorf("invalid rule[%s] passed", inputRuleName) + } + + } + return rulesToApply, nil +} + +func errorPresenter(schemaFilePath string, errors []linter.LintErrorWithMetadata) { + for _, err := range errors { + fmt.Printf("%s:%d:%d %s\n", schemaFilePath, err.Line, err.Column, err.Err.Error()) + } + fmt.Println("") // This is a separator between outputs of individual file +} diff --git a/cmd/linter/linter.go b/cmd/linter/linter.go new file mode 100644 index 0000000..76d679b --- /dev/null +++ b/cmd/linter/linter.go @@ -0,0 +1,88 @@ +package linter + +import ( + "fmt" + "os" + "regexp" + "strings" + + "github.com/CrowdStrike/gql/pkg/linter" + + "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/parser" +) + +var inlineLintConfigurationRegex, _ = regexp.Compile(`^#\s*(lint-[^\s ]+)(\s.*)?$`) + +type lintCommand string + +// InlineLintConfig represent config value defined in the schema for enabling/disabling certain rules for single/some Line/lines +type InlineLintConfig struct { + command lintCommand + rules []string + pos int +} + +// Lint lints federated GraphQL schema +func Lint(fileName string, schemaFileContents string, rules []linter.LintRuleFunc) []linter.LintErrorWithMetadata { + source := &ast.Source{ + Name: fileName, + Input: schemaFileContents, + } + schema, parseErr := parser.ParseSchema(source) + + if parseErr != nil { + fmt.Printf("failed to parse file=%s with error %v", fileName, parseErr) + os.Exit(1) + } + + allErrors := linter.LintErrorsWithMetadata{} + for _, rule := range rules { + errorsFromLintRule := rule(schema) + allErrors = append(allErrors, errorsFromLintRule...) + } + + sortedErrors := allErrors.GetSortedErrors() + inlineLintConfigs := extractInlineLintConfiguration(source) + filteredErrors := filterErrors(sortedErrors, inlineLintConfigs) + return filteredErrors +} + +func filterErrors(errors []linter.LintErrorWithMetadata, configs []InlineLintConfig) []linter.LintErrorWithMetadata { + filteredErrors := make([]linter.LintErrorWithMetadata, 0) + for _, lintErr := range errors { + shouldApplyRule := true + errorLine := lintErr.Line + for _, config := range configs { + // If the error for the lintRule isn't one of the specified rule then there's nothing to do for it + if !contains(config.rules, lintErr.Rule) { + continue + } + if config.command == "lint-disable-line" && config.pos == errorLine { + shouldApplyRule = false + break + } + if config.pos < errorLine { + if config.command == "lint-enable" { + shouldApplyRule = true + } else if config.command == "lint-disable" { + shouldApplyRule = false + } + } + } + if shouldApplyRule { + filteredErrors = append(filteredErrors, lintErr) + } + } + + return filteredErrors +} + +func contains(list []string, first linter.LintRule) bool { + for _, second := range list { + if strings.EqualFold(string(first), second) { + return true + } + } + return false +} diff --git a/cmd/root.go b/cmd/root.go new file mode 100644 index 0000000..f349601 --- /dev/null +++ b/cmd/root.go @@ -0,0 +1,33 @@ +package cmd + +import ( + "fmt" + "os" + + "github.com/CrowdStrike/gql/cmd/compare" + "github.com/CrowdStrike/gql/cmd/linter" + + "github.com/spf13/cobra" +) + +func newRootCmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "gql", + Short: "gql is a CLI built for federated GraphQL services' schemas", + Long: `gql is a CLI built for federated GraphQL services' schemas`, + CompletionOptions: cobra.CompletionOptions{ + HiddenDefaultCmd: true, + }, + } + cmd.AddCommand(linter.NewLintCmd()) + cmd.AddCommand(compare.NewCompareCmd()) + return cmd +} + +// Execute is a wrapper to execute Run function in subcommands +func Execute() { + if err := newRootCmd().Execute(); err != nil { + fmt.Println(err) + os.Exit(1) + } +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..1e87aa3 --- /dev/null +++ b/go.mod @@ -0,0 +1,13 @@ +module github.com/CrowdStrike/gql + +go 1.17 + +require ( + github.com/spf13/cobra v1.4.0 + github.com/vektah/gqlparser/v2 v2.5.0 +) + +require ( + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/spf13/pflag v1.0.5 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..37fd61f --- /dev/null +++ b/go.sum @@ -0,0 +1,33 @@ +github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883 h1:bvNMNQO63//z+xNgfBlViaCIJKLlCJ6/fmUseuG0wVQ= +github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= +github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= +github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= +github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/vektah/gqlparser/v2 v2.5.0 h1:GwEwy7AJsqPWrey0bHnn+3JLaHLZVT66wY/+O+Tf9SU= +github.com/vektah/gqlparser/v2 v2.5.0/go.mod h1:mPgqFBu/woKTVYWyNk8cO3kh4S/f4aRFZrvOnp3hmCs= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/gql.go b/gql.go new file mode 100644 index 0000000..4ec71fa --- /dev/null +++ b/gql.go @@ -0,0 +1,7 @@ +package main + +import "github.com/CrowdStrike/gql/cmd" + +func main() { + cmd.Execute() +} diff --git a/pkg/compare/compare.go b/pkg/compare/compare.go new file mode 100644 index 0000000..06ae02e --- /dev/null +++ b/pkg/compare/compare.go @@ -0,0 +1,1302 @@ +package compare + +import ( + "fmt" + "sort" + + "github.com/vektah/gqlparser/v2/ast" +) + +//const ( +// breakingIcon, dangerousIcon, nonbreakingIcon = ":x:", ":warning:", ":white_check_mark:" +//) + +// ChangeType enum to list all type of breaking/non-breaking/dangerous changes +type ChangeType string + +const ( + // FieldArgumentDescriptionChanged Field Argument Description Changed + FieldArgumentDescriptionChanged ChangeType = "FIELD_ARGUMENT_DESCRIPTION_CHANGED" + // FieldArgumentDefaultChanged Field Argument Default Changed + FieldArgumentDefaultChanged ChangeType = "FIELD_ARGUMENT_DEFAULT_CHANGED" + // FieldArgumentTypeChanged Field Argument Type Changed + FieldArgumentTypeChanged ChangeType = "FIELD_ARGUMENT_TYPE_CHANGED" + // DirectiveRemoved Directive Removed + DirectiveRemoved ChangeType = "DIRECTIVE_REMOVED" + // DirectiveChanged Directive changed + DirectiveChanged ChangeType = "DIRECTIVE_CHANGED" + // DirectiveAdded Directive Added + DirectiveAdded ChangeType = "DIRECTIVE_ADDED" + // DirectiveDescriptionChanged Directive Description Changed + DirectiveDescriptionChanged ChangeType = "DIRECTIVE_DESCRIPTION_CHANGED" + // DirectiveLocationAdded Directive Location Added + DirectiveLocationAdded ChangeType = "DIRECTIVE_LOCATION_ADDED" + // DirectiveLocationRemoved Directive Location Removed + DirectiveLocationRemoved ChangeType = "DIRECTIVE_LOCATION_REMOVED" + // DirectiveArgumentAdded Directive Argument Added + DirectiveArgumentAdded ChangeType = "DIRECTIVE_ARGUMENT_ADDED" + // DirectiveArgumentRemoved Directive Argument Removed + DirectiveArgumentRemoved ChangeType = "DIRECTIVE_ARGUMENT_REMOVED" + // DirectiveArgumentDescriptionChanged Directive Argument Description Changed + DirectiveArgumentDescriptionChanged ChangeType = "DIRECTIVE_ARGUMENT_DESCRIPTION_CHANGED" + // DirectiveArgumentDefaultValueChanged Directive Argument Default Value Changed + DirectiveArgumentDefaultValueChanged ChangeType = "DIRECTIVE_ARGUMENT_DEFAULT_VALUE_CHANGED" + // DirectiveArgumentTypeChanged Directive Argument Type Changed + DirectiveArgumentTypeChanged ChangeType = "DIRECTIVE_ARGUMENT_TYPE_CHANGED" + // DirectiveRepeatableRemoved Directive Repeatable Removed + DirectiveRepeatableRemoved ChangeType = "DIRECTIVE_REPEATABLE_REMOVED" + // DirectiveRepeatableAdded Directive Repeatable Added + DirectiveRepeatableAdded ChangeType = "DIRECTIVE_REPEATABLE_ADDED" + // DirectiveArgumentValueChanged Directive Argument Value Changed + DirectiveArgumentValueChanged ChangeType = "DIRECTIVE_ARGUMENT_VALUE_CHANGED" + // EnumValueRemoved Enum Value Removed + EnumValueRemoved ChangeType = "ENUM_VALUE_REMOVED" + // EnumValueAdded Enum Value Added + EnumValueAdded ChangeType = "ENUM_VALUE_ADDED" + // EnumValueDescriptionChanged Enum Value Description Changed + EnumValueDescriptionChanged ChangeType = "ENUM_VALUE_DESCRIPTION_CHANGED" + // EnumValueDeprecationReasonChanged Enum Value Deprecation Reason Changed + EnumValueDeprecationReasonChanged ChangeType = "ENUM_VALUE_DEPRECATION_REASON_CHANGED" + // EnumValueDeprecationAdded Enum Value Deprecation Added + EnumValueDeprecationAdded ChangeType = "ENUM_VALUE_DEPRECATION_ADDED" + // FieldRemoved Field Removed + FieldRemoved ChangeType = "FIELD_REMOVED" + // FieldAdded Field Added + FieldAdded ChangeType = "FIELD_ADDED" + // FieldDescriptionChanged Field Description Changed + FieldDescriptionChanged ChangeType = "FIELD_DESCRIPTION_CHANGED" + // FieldDeprecationAdded Field Deprecation Added + FieldDeprecationAdded ChangeType = "FIELD_DEPRECATION_ADDED" + // FieldDeprecationRemoved Field Deprecation Removed + FieldDeprecationRemoved ChangeType = "FIELD_DEPRECATION_REMOVED" + // FieldDeprecationReasonChanged Field Deprecation Reason Changed + FieldDeprecationReasonChanged ChangeType = "FIELD_DEPRECATION_REASON_CHANGED" + // FieldTypeChanged Field Type Changed + FieldTypeChanged ChangeType = "FIELD_TYPE_CHANGED" + // FieldArgumentAdded Field Argument Added + FieldArgumentAdded ChangeType = "FIELD_ARGUMENT_ADDED" + // FieldArgumentRemoved Field Argument Removed + FieldArgumentRemoved ChangeType = "FIELD_ARGUMENT_REMOVED" + // InputFieldRemoved Input Field Removed + InputFieldRemoved ChangeType = "INPUT_FIELD_REMOVED" + // InputFieldAdded Input Field Added + InputFieldAdded ChangeType = "INPUT_FIELD_ADDED" + // InputFieldDescriptionChanged Input Field Description Changed + InputFieldDescriptionChanged ChangeType = "INPUT_FIELD_DESCRIPTION_CHANGED" + // InputFieldDefaultValueChanged Input Field Default Value Changed + InputFieldDefaultValueChanged ChangeType = "INPUT_FIELD_DEFAULT_VALUE_CHANGED" + // InputFieldTypeChanged Input Field Type Changed + InputFieldTypeChanged ChangeType = "INPUT_FIELD_TYPE_CHANGED" + // ObjectTypeInterfaceAdded Object Type Interface Added + ObjectTypeInterfaceAdded ChangeType = "OBJECT_TYPE_INTERFACE_ADDED" + // ObjectTypeInterfaceRemoved Object Type Interface Removed + ObjectTypeInterfaceRemoved ChangeType = "OBJECT_TYPE_INTERFACE_REMOVED" + // SchemaQueryTypeChanged Schema Query Type Changed + SchemaQueryTypeChanged ChangeType = "SCHEMA_QUERY_TYPE_CHANGED" + // SchemaMutationTypeChanged Schema Mutation Type Changed + SchemaMutationTypeChanged ChangeType = "SCHEMA_MUTATION_TYPE_CHANGED" + // SchemaSubscriptionTypeChanged Schema Subscription Type Changed + SchemaSubscriptionTypeChanged ChangeType = "SCHEMA_SUBSCRIPTION_TYPE_CHANGED" + // TypeRemoved Type Removed + TypeRemoved ChangeType = "TYPE_REMOVED" + // TypeAdded Type Added + TypeAdded ChangeType = "TYPE_ADDED" + // TypeKindChanged Type Kind Changed + TypeKindChanged ChangeType = "TYPE_KIND_CHANGED" + // TypeDescriptionChanged Type Description Changed + TypeDescriptionChanged ChangeType = "TYPE_DESCRIPTION_CHANGED" + // UnionMemberRemoved Union Member Removed + UnionMemberRemoved ChangeType = "UNION_MEMBER_REMOVED" + // UnionMemberAdded Union Member Added + UnionMemberAdded ChangeType = "UNION_MEMBER_ADDED" +) + +// Criticality severity of a change in schema +type Criticality int + +const ( + // NonBreaking Change is compatible with previous version + NonBreaking Criticality = 0 + // Dangerous Change is compatible with previous version but can result in unexpected behavior for consumer + Dangerous Criticality = 1 + // Breaking Change is incompatible with previous version + Breaking Criticality = 2 +) + +const deprecatedDirective = "deprecated" + +// Change defines a change in schema +type Change struct { + message string + changeType ChangeType + criticalityLevel Criticality + path string + position *ast.Position +} + +// FindChangesInSchemas compares two schemas, returns the list of all changes made in the second schema +func FindChangesInSchemas(oldSchema *ast.SchemaDocument, newSchema *ast.SchemaDocument) []*Change { + var changes []*Change + changes = []*Change{} + changes = append(changes, changeInSchema(oldSchema.Schema, newSchema.Schema)...) + changes = append(changes, changeInSchema(oldSchema.SchemaExtension, newSchema.SchemaExtension)...) + changes = append(changes, changeInTypes(oldSchema, newSchema)...) + changes = append(changes, changeInDirective(oldSchema.Directives, newSchema.Directives)...) + return changes +} + +// changeInSchema change in schema root operations +func changeInSchema(oldSchemaDefs ast.SchemaDefinitionList, newSchemaDefs ast.SchemaDefinitionList) []*Change { + var changes []*Change + if len(oldSchemaDefs) == 0 && len(newSchemaDefs) == 0 { + return changes + } + if len(oldSchemaDefs) > 0 && len(newSchemaDefs) == 0 { + oldQuery := getOperationForName(oldSchemaDefs[0].OperationTypes, ast.Query) + changes = append(changes, changesInSchemaOperation(oldQuery, nil, ast.Query)...) + + oldMutation := getOperationForName(oldSchemaDefs[0].OperationTypes, ast.Mutation) + changes = append(changes, changesInSchemaOperation(oldMutation, nil, ast.Mutation)...) + + oldSubscription := getOperationForName(oldSchemaDefs[0].OperationTypes, ast.Subscription) + changes = append(changes, changesInSchemaOperation(oldSubscription, nil, ast.Subscription)...) + return changes + } + if len(oldSchemaDefs) == 0 && len(newSchemaDefs) > 0 { + newQuery := getOperationForName(newSchemaDefs[0].OperationTypes, ast.Query) + changes = append(changes, changesInSchemaOperation(nil, newQuery, ast.Query)...) + + newMutation := getOperationForName(newSchemaDefs[0].OperationTypes, ast.Mutation) + changes = append(changes, changesInSchemaOperation(nil, newMutation, ast.Mutation)...) + + newSubscription := getOperationForName(newSchemaDefs[0].OperationTypes, ast.Subscription) + changes = append(changes, changesInSchemaOperation(nil, newSubscription, ast.Subscription)...) + return changes + } + + oldQuery := getOperationForName(oldSchemaDefs[0].OperationTypes, ast.Query) + newQuery := getOperationForName(newSchemaDefs[0].OperationTypes, ast.Query) + changes = append(changes, changesInSchemaOperation(oldQuery, newQuery, ast.Query)...) + + oldMutation := getOperationForName(oldSchemaDefs[0].OperationTypes, ast.Mutation) + newMutation := getOperationForName(newSchemaDefs[0].OperationTypes, ast.Mutation) + changes = append(changes, changesInSchemaOperation(oldMutation, newMutation, ast.Mutation)...) + + oldSubscription := getOperationForName(oldSchemaDefs[0].OperationTypes, ast.Subscription) + newSubscription := getOperationForName(newSchemaDefs[0].OperationTypes, ast.Subscription) + changes = append(changes, changesInSchemaOperation(oldSubscription, newSubscription, ast.Subscription)...) + + return changes +} + +func changesInSchemaOperation(oldOp *ast.OperationTypeDefinition, newOp *ast.OperationTypeDefinition, op ast.Operation) []*Change { + var changes []*Change + var changeType ChangeType + switch op { + case ast.Query: + changeType = SchemaQueryTypeChanged + case ast.Mutation: + changeType = SchemaMutationTypeChanged + case ast.Subscription: + changeType = SchemaSubscriptionTypeChanged + } + if oldOp == nil && newOp != nil { + changes = append(changes, &Change{ + changeType: changeType, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Schema %s root has added '%s'", op, newOp.Operation), + position: newOp.Position, + }) + } + if oldOp != nil && newOp == nil { + changes = append(changes, &Change{ + changeType: changeType, + criticalityLevel: Breaking, + message: fmt.Sprintf("Schema %s root has removed '%s'", op, oldOp.Operation), + position: oldOp.Position, + }) + } + if oldOp != nil && newOp != nil && oldOp.Type != newOp.Type { + changes = append(changes, &Change{ + changeType: changeType, + criticalityLevel: Breaking, + message: fmt.Sprintf("Schema %s root has changed from '%s' to '%s'", op, oldOp.Operation, newOp.Operation), + position: newOp.Position, + }) + } + return changes +} + +// changeInTypes change in al the types +func changeInTypes(oldSchema *ast.SchemaDocument, newSchema *ast.SchemaDocument) []*Change { + var changes []*Change + persistedType := map[string][]*ast.Definition{} + //Check if types added/removed/persisted + changes = append(changes, checkTypeRemoved(oldSchema.Definitions, newSchema.Definitions, persistedType, false)...) + changes = append(changes, checkTypeAdded(oldSchema.Definitions, newSchema.Definitions, false)...) + + //Check if extended types added/removed/persisted + changes = append(changes, checkTypeRemoved(oldSchema.Extensions, newSchema.Extensions, persistedType, true)...) + changes = append(changes, checkTypeAdded(oldSchema.Extensions, newSchema.Extensions, true)...) + + //compare persisted types for changes in fields/directives + for _, defs := range persistedType { + ot := defs[0] + nt := defs[1] + if ot.Kind == ast.Enum && nt.Kind == ast.Enum { + changes = append(changes, changeInEnum(ot, nt)...) + } + if ot.Kind == ast.InputObject && nt.Kind == ast.InputObject { + changes = append(changes, changeInInputFields(ot, nt)...) + } + if ot.Kind == ast.Interface && nt.Kind == ast.Interface { + changes = append(changes, changeInTypeFieldDirectives(ot.Directives, nt.Directives, nt.Name, nt.Position)...) + changes = append(changes, changeInFields(ot, nt)...) + } + if ot.Kind == ast.Object && nt.Kind == ast.Object { + changes = append(changes, changeInObject(ot, nt)...) + } + if ot.Kind == ast.Union && nt.Kind == ast.Union { + changes = append(changes, changeInUnion(ot, nt)...) + } + if ot.Kind != nt.Kind { + //Changing the kind of a type is a breaking change because it can cause existing queries to error. + //For example, turning an object type to a scalar type would break queries that define a selection set for this type. + changes = append(changes, &Change{ + changeType: TypeKindChanged, + criticalityLevel: Breaking, + message: fmt.Sprintf("Type '%s' kind changed from '%s' to '%s'", ot.Name, ot.Kind, nt.Kind), + path: nt.Name, + position: nt.Position, + }) + } + if ot.Description != nt.Description { + changes = append(changes, &Change{ + changeType: TypeDescriptionChanged, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Type '%s' description changed", ot.Name), + path: nt.Name, + position: nt.Position, + }) + } + } + return changes +} + +func checkTypeRemoved(oldSchemaDefs ast.DefinitionList, newSchemaDefs ast.DefinitionList, persistedType map[string][]*ast.Definition, isExtended bool) []*Change { + var changes []*Change + for _, ot := range oldSchemaDefs { + nt := newSchemaDefs.ForName(ot.Name) + if nt == nil { + msg := fmt.Sprintf("Type '%s' was removed", ot.Name) + if isExtended { + msg = fmt.Sprintf("Extended type '%s' was removed", ot.Name) + } + //removing a type from schema is a breaking change + changes = append(changes, &Change{ + changeType: TypeRemoved, + criticalityLevel: Breaking, + message: msg, + path: ot.Name, + position: ot.Position, + }) + } else { + persistedType[ot.Name] = []*ast.Definition{ot, nt} + } + } + return changes +} + +func checkTypeAdded(oldSchemaDefs ast.DefinitionList, newSchemaDefs ast.DefinitionList, isExtended bool) []*Change { + var changes []*Change + for _, nt := range newSchemaDefs { + ot := oldSchemaDefs.ForName(nt.Name) + if ot == nil { + msg := fmt.Sprintf("Type '%s' was added", nt.Name) + if isExtended { + msg = fmt.Sprintf("Extended type '%s' was added", nt.Name) + } + //type added to new schema + changes = append(changes, &Change{ + changeType: TypeAdded, + criticalityLevel: NonBreaking, + message: msg, + path: nt.Name, + position: nt.Position, + }) + } + } + return changes +} + +// changeInDirective change in the directive definitions +func changeInDirective(oDirs ast.DirectiveDefinitionList, nDirs ast.DirectiveDefinitionList) []*Change { + var changes []*Change + for _, od := range oDirs { + nd := nDirs.ForName(od.Name) + if nd == nil { + changes = append(changes, &Change{ + changeType: DirectiveRemoved, + criticalityLevel: Breaking, + message: fmt.Sprintf("Directive '@%s' was removed ", od.Name), + path: fmt.Sprintf("@%s", od.Name), + position: od.Position, + }) + } else { + //description changed + if od.Description != nd.Description { + changes = append(changes, &Change{ + changeType: DirectiveDescriptionChanged, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Directive '@%s' description changed ", od.Name), + path: fmt.Sprintf("@%s", nd.Name), + position: nd.Position, + }) + } + changes = append(changes, checkDirectiveLocationChanged(od, nd)...) + changes = append(changes, checkDirectiveRepeatableChanged(od, nd)...) + //argument changed + changes = append(changes, changeInDirectiveArguments(od, nd)...) + changes = append(changes, checkDirectiveArgumentAdded(od, nd)...) + } + } + changes = append(changes, checkDirectiveAdded(oDirs, nDirs)...) + return changes +} + +func checkDirectiveArgumentAdded(od *ast.DirectiveDefinition, nd *ast.DirectiveDefinition) []*Change { + var changes []*Change + for _, nArg := range nd.Arguments { + oArg := od.Arguments.ForName(nArg.Name) + if oArg == nil { + //argument added to the field + //Adding non-nullable argument is a breaking change + if nArg.Type.NonNull { + changes = append(changes, &Change{ + changeType: DirectiveArgumentAdded, + criticalityLevel: Breaking, + message: fmt.Sprintf("Non-nullable argument '%s:%s' was added to directive '@%s'", nArg.Name, nArg.Type.String(), nd.Name), + path: fmt.Sprintf("@%s.%s", od.Name, nArg.Name), + position: nArg.Position, + }) + } else { + changes = append(changes, &Change{ + changeType: DirectiveArgumentAdded, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Argument '%s:%s' was added to to directive '@%s'", nArg.Name, nArg.Type.String(), nd.Name), + path: fmt.Sprintf("@%s.%s", od.Name, nArg.Name), + position: nArg.Position, + }) + } + } + } + return changes +} + +func changeInDirectiveArguments(od *ast.DirectiveDefinition, nd *ast.DirectiveDefinition) []*Change { + var changes []*Change + for _, oArg := range od.Arguments { + nArg := nd.Arguments.ForName(oArg.Name) + if nArg == nil { + //argument is removed + changes = append(changes, &Change{ + changeType: DirectiveArgumentRemoved, + criticalityLevel: Breaking, + message: fmt.Sprintf("Argument '%s' was removed from directive '@%s'", oArg.Name, od.Name), + path: fmt.Sprintf("@%s.%s", od.Name, oArg.Name), + position: nd.Position, + }) + } else { + //check argument type change + changes = append(changes, checkDirectiveArgumentTypeChanged(od, oArg, nArg)...) + + //check description change + if oArg.Description != nArg.Description { + changes = append(changes, &Change{ + changeType: DirectiveArgumentDescriptionChanged, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Argument '%s' description changed in directive '@%s' ", oArg.Name, od.Name), + path: fmt.Sprintf("@%s.%s", od.Name, oArg.Name), + position: nArg.Position, + }) + } + } + } + return changes +} + +func checkDirectiveArgumentTypeChanged(od *ast.DirectiveDefinition, oArg *ast.ArgumentDefinition, nArg *ast.ArgumentDefinition) []*Change { + var changes []*Change + if oArg.Type.String() != nArg.Type.String() { + //Changing an input field from non-null to null is considered non-breaking. + cl := NonBreaking + if !isSafeChangeForInputValue(oArg.Type, nArg.Type) { + cl = Breaking + } + changes = append(changes, &Change{ + changeType: DirectiveArgumentTypeChanged, + criticalityLevel: cl, + message: fmt.Sprintf("Argument '%s' type changed from '%s' to '%s' in directive '@%s' ", oArg.Name, oArg.Type.String(), nArg.Type.String(), od.Name), + path: fmt.Sprintf("@%s.%s", od.Name, oArg.Name), + position: nArg.Position, + }) + } + //Changing the default value for an argument may change the runtime behaviour of a field if it was never provided. + if oArg.DefaultValue.String() != nArg.DefaultValue.String() { + changes = append(changes, &Change{ + changeType: DirectiveArgumentDefaultValueChanged, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Argument '%s' default value changed from '%s' to '%s' in directive '@%s' ", oArg.Name, oArg.DefaultValue.String(), nArg.DefaultValue.String(), od.Name), + path: fmt.Sprintf("@%s.%s", od.Name, oArg.Name), + position: nArg.Position, + }) + } + return changes +} + +func checkDirectiveRepeatableChanged(od *ast.DirectiveDefinition, nd *ast.DirectiveDefinition) []*Change { + var changes []*Change + //isRepeatable removed + if od.IsRepeatable && !nd.IsRepeatable { + changes = append(changes, &Change{ + changeType: DirectiveRepeatableRemoved, + criticalityLevel: Breaking, + message: fmt.Sprintf("Repeatable flag was removed from '@%s' directive", od.Name), + path: fmt.Sprintf("@%s", nd.Name), + position: nd.Position, + }) + } + //isRepeatable added + if !od.IsRepeatable && nd.IsRepeatable { + changes = append(changes, &Change{ + changeType: DirectiveRepeatableAdded, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Repeatable flag was removed from '@%s' directive", od.Name), + path: fmt.Sprintf("@%s", nd.Name), + position: nd.Position, + }) + } + return changes +} + +func checkDirectiveLocationChanged(od *ast.DirectiveDefinition, nd *ast.DirectiveDefinition) []*Change { + var changes []*Change + //location changed + found := false + for _, ol := range od.Locations { + found = false + for _, nl := range nd.Locations { + if ol == nl { + found = true + } + } + if !found { + changes = append(changes, &Change{ + changeType: DirectiveLocationRemoved, + criticalityLevel: Breaking, + message: fmt.Sprintf("Location '%s' was removed from '@%s' directive", ol, od.Name), + path: fmt.Sprintf("@%s", nd.Name), + position: nd.Position, + }) + } + } + for _, nl := range nd.Locations { + found = false + for _, ol := range od.Locations { + if nl == ol { + found = true + } + } + if !found { + changes = append(changes, &Change{ + changeType: DirectiveLocationAdded, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Location '%s' was added to '@%s' directive", nl, nd.Name), + path: fmt.Sprintf("@%s", nd.Name), + position: nd.Position, + }) + } + } + return changes +} + +func checkDirectiveAdded(oDirs ast.DirectiveDefinitionList, nDirs ast.DirectiveDefinitionList) []*Change { + var changes []*Change + for _, nd := range nDirs { + od := oDirs.ForName(nd.Name) + if od == nil { + changes = append(changes, &Change{ + changeType: DirectiveAdded, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Directive '@%s' was added ", nd.Name), + path: fmt.Sprintf("@%s", nd.Name), + position: nd.Position, + }) + } + } + return changes +} + +func changeInEnum(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + for _, ov := range oDef.EnumValues { + nv := nDef.EnumValues.ForName(ov.Name) + oDep := ov.Directives.ForName(deprecatedDirective) + if nv == nil { + msg := fmt.Sprintf("Enum value '%s' was removed from enum '%s'", ov.Name, oDef.Name) + if oDep != nil { + msg = fmt.Sprintf("Enum value '%s'(deprecated) was removed from enum '%s' ", ov.Name, oDef.Name) + } + //Removing an enum value will cause existing queries that use this enum value to error. + changes = append(changes, &Change{ + changeType: EnumValueRemoved, + criticalityLevel: Breaking, + message: msg, + path: fmt.Sprintf("%s.%s", oDef.Name, ov.Name), + position: ov.Position, + }) + } else { + if ov.Description != nv.Description { + changes = append(changes, &Change{ + changeType: EnumValueDescriptionChanged, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Enum value '%s' description changed in enum '%s' ", ov.Name, oDef.Name), + path: fmt.Sprintf("%s.%s", oDef.Name, ov.Name), + position: nv.Position, + }) + } + changes = append(changes, checkEnumValueDeprecationChanged(oDef, nv, ov)...) + } + } + changes = append(changes, checkEnumValuesAdded(oDef, nDef)...) + return changes +} + +func checkEnumValueDeprecationChanged(oDef *ast.Definition, nv *ast.EnumValueDefinition, ov *ast.EnumValueDefinition) []*Change { + var changes []*Change + oDep := ov.Directives.ForName(deprecatedDirective) + nDep := nv.Directives.ForName(deprecatedDirective) + if oDep == nil && nDep != nil { + changes = append(changes, &Change{ + changeType: EnumValueDeprecationAdded, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Enum value '%s' deprecated in enum '%s' ", ov.Name, oDef.Name), + path: fmt.Sprintf("%s.%s", oDef.Name, ov.Name), + position: nv.Position, + }) + } + if oDep != nil && nDep != nil && oDep.Arguments.ForName("reason") != nDep.Arguments.ForName("reason") { + changes = append(changes, &Change{ + changeType: EnumValueDeprecationReasonChanged, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Enum value '%s' deprecation reason changed in enum '%s' ", ov.Name, oDef.Name), + path: fmt.Sprintf("%s.%s", oDef.Name, ov.Name), + position: nv.Position, + }) + } + return changes +} + +func checkEnumValuesAdded(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + for _, nv := range nDef.EnumValues { + ov := oDef.EnumValues.ForName(nv.Name) + if ov == nil { + //Adding an enum value may break existing clients that were not programming defensively against an added case when querying an enum. + changes = append(changes, &Change{ + changeType: EnumValueAdded, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Enum value '%s' was added to enum '%s'", nv.Name, nDef.Name), + path: fmt.Sprintf("%s.%s", oDef.Name, nv.Name), + position: nv.Position, + }) + } + } + return changes +} + +func changeInObject(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + //check implementing interfaces + changes = append(changes, checkTypeInterfaceRemoved(oDef, nDef)...) + changes = append(changes, checkTypeInterfacesAdded(oDef, nDef)...) + changes = append(changes, changeInTypeFieldDirectives(oDef.Directives, nDef.Directives, nDef.Name, nDef.Position)...) + changes = append(changes, changeInFields(oDef, nDef)...) + return changes +} + +func checkTypeInterfaceRemoved(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + found := false + for _, oInt := range oDef.Interfaces { + found = false + for _, nInt := range nDef.Interfaces { + if oInt == nInt { + found = true + } + } + if !found { + //Removing an interface from an object type can cause existing queries that use this in a fragment spread to error. + changes = append(changes, &Change{ + changeType: ObjectTypeInterfaceRemoved, + criticalityLevel: Breaking, + message: fmt.Sprintf("'%s' object type no longer implements '%s' interface", oDef.Name, oInt), + path: oDef.Name, + position: nDef.Position, + }) + } + } + return changes +} + +func checkTypeInterfacesAdded(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + found := false + for _, nInt := range nDef.Interfaces { + found = false + for _, oInt := range oDef.Interfaces { + if oInt == nInt { + found = true + } + } + if !found { + //Adding an interface to an object type may break existing clients that were not programming defensively against a new possible type. + changes = append(changes, &Change{ + changeType: ObjectTypeInterfaceAdded, + criticalityLevel: Dangerous, + message: fmt.Sprintf("'%s' object type implements '%s' interface", nDef.Name, nInt), + path: oDef.Name, + position: nDef.Position, + }) + } + } + return changes +} + +func changeInUnion(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + //Check if union types added/removed + changes = append(changes, checkUnionMemberRemoved(oDef, nDef)...) + changes = append(changes, checkUnionMemberAdded(oDef, nDef)...) + return changes +} + +func checkUnionMemberRemoved(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + found := false + for _, ot := range oDef.Types { + found = false + for _, nt := range nDef.Types { + if ot == nt { + found = true + } + } + if !found { + //Removing a union member from a union can cause existing queries that use this union member in a fragment spread to error. + changes = append(changes, &Change{ + changeType: UnionMemberRemoved, + criticalityLevel: Breaking, + message: fmt.Sprintf("Member '%s' was removed from Union type '%s'", ot, oDef.Name), + path: oDef.Name, + position: nDef.Position, + }) + } + } + return changes +} + +func checkUnionMemberAdded(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + found := false + for _, nt := range nDef.Types { + found = false + for _, ot := range oDef.Types { + if ot == nt { + found = true + } + } + if !found { + //Adding a possible type to Unions may break existing clients that were not programming defensively against a new possible type. + changes = append(changes, &Change{ + changeType: UnionMemberAdded, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Member '%s' was added to Union type '%s'", nt, nDef.Name), + path: oDef.Name, + position: nDef.Position, + }) + } + } + return changes +} + +func changeInFields(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + for _, of := range oDef.Fields { + nf := nDef.Fields.ForName(of.Name) + oDep := of.Directives.ForName(deprecatedDirective) + if nf == nil { + //Removing a field is a breaking change. It is preferable to deprecate the field before removing it. + msg := fmt.Sprintf("Field '%s.%s' was removed from %s", oDef.Name, of.Name, oDef.Kind) + if oDep != nil { + //Removing a deprecated field is a breaking change. + //Before removing it, you may want to look at the field's usage to see the impact of removing the field. + msg = fmt.Sprintf("Field '%s.%s'(deprecated) was removed from %s", oDef.Name, of.Name, oDef.Kind) + } + changes = append(changes, &Change{ + changeType: FieldRemoved, + criticalityLevel: Breaking, + message: msg, + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nDef.Position, + }) + } else { + //Check field type + changes = append(changes, checkFieldTypeChanged(oDef, of, nf)...) + //Check description change + if of.Description != nf.Description { + changes = append(changes, &Change{ + changeType: FieldDescriptionChanged, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Field '%s.%s' description changed in %s", oDef.Name, of.Name, oDef.Kind), + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nf.Position, + }) + } + //Check deprecation changes + changes = append(changes, checkFieldDeprecationChanged(oDef, nf, of)...) + //check argument changes + changes = append(changes, changeInArgument(of, nf, oDef.Name)...) + changes = append(changes, changeInTypeFieldDirectives(of.Directives, nf.Directives, fmt.Sprintf("%s.%s", nDef.Name, nf.Name), nf.Position)...) + } + } + changes = append(changes, checkFieldsAdded(oDef, nDef)...) + return changes +} + +func checkFieldTypeChanged(oDef *ast.Definition, of *ast.FieldDefinition, nf *ast.FieldDefinition) []*Change { + var changes []*Change + if of.Type.String() != nf.Type.String() { + cl := NonBreaking + if !isSafeChangeForFieldType(of.Type, nf.Type) { + cl = Breaking + } + changes = append(changes, &Change{ + changeType: FieldTypeChanged, + criticalityLevel: cl, + message: fmt.Sprintf("Field '%s.%s' type changed from '%s' to '%s' in %s ", oDef.Name, of.Name, of.Type.String(), nf.Type.String(), oDef.Kind), + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nf.Position, + }) + } + return changes +} + +func checkFieldDeprecationChanged(oDef *ast.Definition, nf *ast.FieldDefinition, of *ast.FieldDefinition) []*Change { + var changes []*Change + oDep := of.Directives.ForName(deprecatedDirective) + nDep := nf.Directives.ForName(deprecatedDirective) + if oDep == nil && nDep != nil { + changes = append(changes, &Change{ + changeType: FieldDeprecationAdded, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Field '%s.%s' deprecated in %s ", oDef.Name, of.Name, oDef.Kind), + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nf.Position, + }) + } + if oDep != nil && nDep == nil { + changes = append(changes, &Change{ + changeType: FieldDeprecationRemoved, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Field '%s.%s' deprecation removed in %s ", oDef.Name, of.Name, oDef.Kind), + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nf.Position, + }) + } + if oDep != nil && nDep != nil && oDep.Arguments.ForName("reason") != nDep.Arguments.ForName("reason") { + changes = append(changes, &Change{ + changeType: FieldDeprecationReasonChanged, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Field '%s.%s' deprecation reason changed in %s ", oDef.Name, of.Name, oDef.Kind), + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nf.Position, + }) + } + return changes +} + +func checkFieldsAdded(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + for _, nf := range nDef.Fields { + if oDef.Fields.ForName(nf.Name) == nil { + //Field added to the type + changes = append(changes, &Change{ + changeType: FieldAdded, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Field '%s.%s' was added to %s", nDef.Name, nf.Name, nDef.Kind), + path: fmt.Sprintf("%s.%s", oDef.Name, nf.Name), + position: nf.Position, + }) + } + } + return changes +} + +func changeInArgument(oDef *ast.FieldDefinition, nDef *ast.FieldDefinition, typeName string) []*Change { + var changes []*Change + for _, oArg := range oDef.Arguments { + nArg := nDef.Arguments.ForName(oArg.Name) + if nArg == nil { + //Removing a field argument is a breaking change because it will cause existing queries that use this argument to error. + changes = append(changes, &Change{ + changeType: FieldArgumentRemoved, + criticalityLevel: Breaking, + message: fmt.Sprintf("Argument '%s:%s' was removed from field '%s.%s'", oArg.Name, oArg.Type.String(), typeName, nDef.Name), + path: fmt.Sprintf("%s.%s.%s", typeName, oDef.Name, oArg.Name), + position: nDef.Position, + }) + } else { + //check argument type change + changes = append(changes, checkFieldArgumentTypeChanged(oArg, nArg, typeName, oDef.Name)...) + //Changing the default value for an argument may change the runtime behaviour of a field if it was never provided. + if oArg.DefaultValue.String() != nArg.DefaultValue.String() { + changes = append(changes, &Change{ + changeType: FieldArgumentDefaultChanged, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Argument '%s' default value changed from '%s' to '%s' in '%s.%s' ", oArg.Name, oArg.DefaultValue.String(), nArg.DefaultValue.String(), typeName, oDef.Name), + path: fmt.Sprintf("%s.%s.%s", typeName, oDef.Name, oArg.Name), + position: nArg.Position, + }) + } + + //check description change + if oArg.Description != nArg.Description { + changes = append(changes, &Change{ + changeType: FieldArgumentDescriptionChanged, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Argument '%s' description changed in '%s.%s' ", oArg.Name, typeName, oDef.Name), + path: fmt.Sprintf("%s.%s.%s", typeName, oDef.Name, oArg.Name), + position: nArg.Position, + }) + } + } + } + changes = append(changes, checkFieldArgumentAdded(oDef, nDef, typeName)...) + return changes +} + +func checkFieldArgumentTypeChanged(oArg *ast.ArgumentDefinition, nArg *ast.ArgumentDefinition, typeName string, fieldName string) []*Change { + var changes []*Change + if oArg.Type.String() != nArg.Type.String() { + //Changing an input field from non-null to null is considered non-breaking. + cl := NonBreaking + if !isSafeChangeForInputValue(oArg.Type, nArg.Type) { + //Changing the type of a field's argument can cause existing queries that use this argument to error. + cl = Breaking + } + changes = append(changes, &Change{ + changeType: FieldArgumentTypeChanged, + criticalityLevel: cl, + message: fmt.Sprintf("Argument '%s' type changed from '%s' to '%s' in '%s.%s' ", oArg.Name, oArg.Type.String(), nArg.Type.String(), typeName, fieldName), + path: fmt.Sprintf("%s.%s.%s", typeName, fieldName, oArg.Name), + position: nArg.Position, + }) + } + return changes +} + +func checkFieldArgumentAdded(oDef *ast.FieldDefinition, nDef *ast.FieldDefinition, typeName string) []*Change { + var changes []*Change + for _, nArg := range nDef.Arguments { + oArg := oDef.Arguments.ForName(nArg.Name) + if oArg == nil { + //Adding a required argument to an existing field is a breaking change because it will cause existing uses of this field to error. + if nArg.Type.NonNull { + changes = append(changes, &Change{ + changeType: FieldArgumentAdded, + criticalityLevel: Breaking, + message: fmt.Sprintf("Required argument '%s:%s' was added to field '%s.%s'", nArg.Name, nArg.Type.String(), typeName, nDef.Name), + path: fmt.Sprintf("%s.%s.%s", typeName, oDef.Name, nArg.Name), + position: nArg.Position, + }) + } else { + //Adding a new argument to an existing field may involve a change in resolve function logic that potentially may cause some side effects. + changes = append(changes, &Change{ + changeType: FieldArgumentAdded, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Argument '%s:%s' was added to field '%s.%s'", nArg.Name, nArg.Type.String(), typeName, nDef.Name), + path: fmt.Sprintf("%s.%s.%s", typeName, oDef.Name, nArg.Name), + position: nArg.Position, + }) + } + } + } + return changes +} + +func changeInInputFields(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + for _, of := range oDef.Fields { + nf := nDef.Fields.ForName(of.Name) + oDep := of.Directives.ForName(deprecatedDirective) + if nf == nil { + //Removing an input field will cause existing queries that use this input field to error. + msg := fmt.Sprintf("Input field '%s.%s' was removed from input object type", oDef.Name, of.Name) + if oDep != nil { + msg = fmt.Sprintf("input field '%s.%s'(deprecated) was removed from input object type", oDef.Name, of.Name) + } + changes = append(changes, &Change{ + changeType: InputFieldRemoved, + criticalityLevel: Breaking, + message: msg, + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nDef.Position, + }) + } else { + //Check input field type + changes = append(changes, checkInputFieldTypeValueChanged(oDef, of, nf)...) + //Check description change + if of.Description != nf.Description { + changes = append(changes, &Change{ + changeType: InputFieldDescriptionChanged, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Input field '%s.%s' description changed in input object type", oDef.Name, of.Name), + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nf.Position, + }) + } + //check change in field directives + changes = append(changes, changeInTypeFieldDirectives(of.Directives, nf.Directives, fmt.Sprintf("%s.%s", nDef.Name, nf.Name), nf.Position)...) + } + } + changes = append(changes, checkInputFieldsAdded(oDef, nDef)...) + return changes +} + +func checkInputFieldTypeValueChanged(oDef *ast.Definition, of *ast.FieldDefinition, nf *ast.FieldDefinition) []*Change { + var changes []*Change + if of.Type.String() != nf.Type.String() { + //Changing an input field from non-null to null is considered non-breaking. + cl := NonBreaking + if !isSafeChangeForInputValue(of.Type, nf.Type) { + //Changing the type of an input field can cause existing queries that use this field to error. + cl = Breaking + } + changes = append(changes, &Change{ + changeType: InputFieldTypeChanged, + criticalityLevel: cl, + message: fmt.Sprintf("Input field '%s.%s' type changed from '%s' to '%s' in input object type", oDef.Name, of.Name, of.Type.String(), nf.Type.String()), + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nf.Position, + }) + } + //Changing the default value for an argument may change the runtime behaviour of a field if it was never provided. + if of.DefaultValue.String() != nf.DefaultValue.String() { + changes = append(changes, &Change{ + changeType: InputFieldDefaultValueChanged, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Input field '%s.%s' default value changed from '%s' to '%s' in input object type", oDef.Name, of.Name, of.DefaultValue.String(), nf.DefaultValue.String()), + path: fmt.Sprintf("%s.%s", oDef.Name, of.Name), + position: nf.Position, + }) + } + return changes +} + +func checkInputFieldsAdded(oDef *ast.Definition, nDef *ast.Definition) []*Change { + var changes []*Change + for _, nf := range nDef.Fields { + if oDef.Fields.ForName(nf.Name) == nil { + //input field added to the type + if nf.Type.NonNull { + //Adding a required input field to an existing input object type is a breaking change because it will cause existing uses of this input object type to error. + changes = append(changes, &Change{ + changeType: InputFieldAdded, + criticalityLevel: Breaking, + message: fmt.Sprintf("Required field '%s' was added to input object type '%s'", nf.Name, nDef.Name), + path: fmt.Sprintf("%s.%s", oDef.Name, nf.Name), + position: nf.Position, + }) + } else { + changes = append(changes, &Change{ + changeType: InputFieldAdded, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Field '%s' was added to input object type '%s'", nf.Name, nDef.Name), + path: fmt.Sprintf("%s.%s", oDef.Name, nf.Name), + position: nf.Position, + }) + } + } + } + return changes +} + +func changeInTypeFieldDirectives(oDirs ast.DirectiveList, nDirs ast.DirectiveList, typeName string, pos *ast.Position) []*Change { + var changes []*Change + for _, od := range oDirs { + oDirList := oDirs.ForNames(od.Name) //if the directive is repetitive + if od.Name != deprecatedDirective { + nDirList := nDirs.ForNames(od.Name) + switch { + case len(nDirList) == 0: + changes = append(changes, &Change{ + changeType: DirectiveRemoved, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Directive '@%s' was removed from '%s'", od.Name, typeName), + path: typeName, + position: pos, + }) + case len(nDirList) == 1 && len(oDirList) == 1: + //means there is only one directive, check for the argument changes + nd := nDirList[0] + changes = append(changes, checkFieldDirectiveArgumentChanged(od, nd, typeName, pos)...) + changes = append(changes, checkFieldDirectiveArgumentAdded(od, nd, typeName)...) + default: + //check if at least one directive from the list matches arguments + haveSameArgVals := false + for _, nd := range nDirList { + if len(od.Arguments) == len(nd.Arguments) { + for _, oArg := range od.Arguments { + nArg := nd.Arguments.ForName(oArg.Name) + if nArg != nil && oArg.Value.String() == nArg.Value.String() { + haveSameArgVals = true // args checked till now are same + continue + } + // We found first arg not matching so not need to check more args + haveSameArgVals = false + break + } + if haveSameArgVals { + // means the directive have all the arguments matching so no need to check other directives in the list + break + } + } + } + if !haveSameArgVals { + changes = append(changes, &Change{ + changeType: DirectiveChanged, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Directive '@%s' was changed on '%s'", od.Name, typeName), + path: typeName, + position: pos, + }) + } + } + } + } + changes = append(changes, checkFieldDirectiveAdded(oDirs, nDirs, typeName)...) + return changes +} + +func checkFieldDirectiveArgumentAdded(od *ast.Directive, nd *ast.Directive, typeName string) []*Change { + var changes []*Change + for _, nArg := range nd.Arguments { + oArg := od.Arguments.ForName(nArg.Name) + if oArg == nil { + //argument added to the field + changes = append(changes, &Change{ + changeType: DirectiveArgumentAdded, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Directive '@%s' argument '%s' was added to in '%s'", nd.Name, nArg.Name, typeName), + path: fmt.Sprintf("%s.@%s", typeName, od.Name), + position: nArg.Position, + }) + } + } + return changes +} + +func checkFieldDirectiveArgumentChanged(od *ast.Directive, nd *ast.Directive, typeName string, pos *ast.Position) []*Change { + var changes []*Change + for _, oArg := range od.Arguments { + nArg := nd.Arguments.ForName(oArg.Name) + if nArg == nil { + //argument is removed + changes = append(changes, &Change{ + changeType: DirectiveArgumentRemoved, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Directive '@%s' argument '%s' was removed in '%s'", od.Name, oArg.Name, typeName), + path: fmt.Sprintf("@%s.%s", od.Name, oArg.Name), + position: pos, + }) + } else if oArg.Value.String() != nArg.Value.String() { + changes = append(changes, &Change{ + changeType: DirectiveArgumentValueChanged, + criticalityLevel: Dangerous, + message: fmt.Sprintf("Directive '@%s' argument '%s' value changed from '%s' to '%s' in '%s' ", od.Name, oArg.Name, oArg.Value.String(), nArg.Value.String(), typeName), + path: fmt.Sprintf("@%s.%s", od.Name, oArg.Name), + position: nArg.Position, + }) + } + } + return changes +} + +func checkFieldDirectiveAdded(oDirs ast.DirectiveList, nDirs ast.DirectiveList, typeName string) []*Change { + var changes []*Change + for _, nd := range nDirs { + if nd.Name != deprecatedDirective { + od := oDirs.ForName(nd.Name) + if od == nil { + changes = append(changes, &Change{ + changeType: DirectiveAdded, + criticalityLevel: NonBreaking, + message: fmt.Sprintf("Directive '@%s' was added in '%s'", nd.Name, typeName), + path: typeName, + position: nd.Position, + }) + } + } + } + return changes +} + +func isSafeChangeForFieldType(otyp *ast.Type, ntyp *ast.Type) bool { + if !isWrappingType(otyp) && !isWrappingType(ntyp) { + //if they're both named types, see if their names are equivalent + return otyp.String() == ntyp.String() + } + if !ntyp.NonNull && otyp.NonNull { + return false + } + if ntyp.NonNull { + if isListType(ntyp) { + //if they're both lists, make sure underlying types are compatible + return isListType(otyp) && isSafeChangeForFieldType(otyp.Elem, ntyp.Elem) + } + //moving from nullable to non-nullable is safe change + return otyp.NamedType == ntyp.NamedType + } + if isListType(otyp) { + //if they're both lists, make sure underlying types are compatible + return isListType(ntyp) && isSafeChangeForFieldType(otyp.Elem, ntyp.Elem) + } + return false +} + +func isSafeChangeForInputValue(otyp *ast.Type, ntyp *ast.Type) bool { + if !isWrappingType(otyp) && !isWrappingType(ntyp) { + // if they're both named types, see if their names are equivalent + return otyp.String() == ntyp.String() + } + if !otyp.NonNull && ntyp.NonNull { + return false + } + if otyp.NonNull { + if isListType(otyp) { + //if they're both lists, make sure underlying types are compatible + return isListType(ntyp) && isSafeChangeForInputValue(otyp.Elem, ntyp.Elem) + } + //moving from non-nullable to nullable is safe change + return otyp.NamedType == ntyp.NamedType + } + // if they're both lists, make sure underlying types are compatible + if isListType(otyp) && isListType(ntyp) { + return isSafeChangeForInputValue(otyp.Elem, ntyp.Elem) + } + return false +} + +//IsListType checks if a type is a list +func isListType(typ *ast.Type) bool { + return typ != nil && typ.Elem != nil && typ.NamedType == "" +} + +//IsNonNullType checks if a type can be null or not +func isNonNullType(typ *ast.Type) bool { + return typ != nil && typ.NonNull +} + +func isWrappingType(typ *ast.Type) bool { + return isListType(typ) || isNonNullType(typ) +} + +func getOperationForName(ops ast.OperationTypeDefinitionList, name ast.Operation) *ast.OperationTypeDefinition { + for _, op := range ops { + if op.Operation == name { + return op + } + } + return nil +} + +// GroupChanges group all changes on their criticality level +func GroupChanges(changes []*Change) map[Criticality][]*Change { + groupChanges := map[Criticality][]*Change{} + for _, c := range changes { + if _, ok := groupChanges[c.criticalityLevel]; !ok { + groupChanges[c.criticalityLevel] = []*Change{} + } + groupChanges[c.criticalityLevel] = append(groupChanges[c.criticalityLevel], c) + } + return groupChanges +} + +// ReportBreakingChanges print only breaking changes in output +func ReportBreakingChanges(changes []*Change, withFilepath bool) int { + if len(changes) == 0 { + return 0 + } + sort.Slice(changes, less(changes)) + for _, c := range changes { + if pos := getPosition(c); withFilepath && len(pos) > 0 { + fmt.Printf("%s %s %s\n", "❌", pos, c.message) + continue + } + fmt.Printf("%s %s\n", "❌", c.message) + } + return len(changes) +} + +// ReportDangerousChanges print only breaking changes in output +func ReportDangerousChanges(changes []*Change, withFilepath bool) int { + if len(changes) == 0 { + return 0 + } + sort.Slice(changes, less(changes)) + for _, c := range changes { + if pos := getPosition(c); withFilepath && len(pos) > 0 { + fmt.Printf("%s %s %s\n", "✋️", pos, c.message) + continue + } + fmt.Printf("%s %s\n", "✋️", c.message) + } + return len(changes) +} + +// ReportNonBreakingChanges print only breaking changes in output +func ReportNonBreakingChanges(changes []*Change, withFilepath bool) int { + if len(changes) == 0 { + return 0 + } + sort.Slice(changes, less(changes)) + for _, c := range changes { + if pos := getPosition(c); withFilepath && len(pos) > 0 { + fmt.Printf("%s %s %s\n", "✅", pos, c.message) + continue + } + fmt.Printf("%s %s\n", "✅", c.message) + } + return len(changes) +} + +func less(changes []*Change) func(i int, j int) bool { + return func(i, j int) bool { + if changes[i].position.Src.Name != changes[j].position.Src.Name { + return changes[i].position.Src.Name < changes[j].position.Src.Name + } + return changes[i].position.Line < changes[j].position.Line + } +} + +func getPosition(c *Change) string { + position := "" + if c.position != nil { + fileName := "" + if c.position.Src.Name != "" { + fileName = c.position.Src.Name + } + position = fmt.Sprintf("%s:%d", fileName, c.position.Line) + } + return position +} diff --git a/pkg/compare/compare_test.go b/pkg/compare/compare_test.go new file mode 100644 index 0000000..6d7ba2a --- /dev/null +++ b/pkg/compare/compare_test.go @@ -0,0 +1,1541 @@ +package compare + +import ( + "path" + "testing" + + "github.com/CrowdStrike/gql/utils" + + "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/parser" +) + +func TestCompareSchemaRoot(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Schema root query type changed", + oldSchema: ` + schema { + query: RootQuery + mutation: RootMutation + }`, + newSchema: ` + schema { + query: RootQueryChanged + mutation: RootMutation + }`, + criticality: Breaking, + ChangeType: SchemaQueryTypeChanged, + }, + { + name: "Schema with root query type added", + oldSchema: ``, + newSchema: ` + schema { + query: RootQuery + }`, + criticality: NonBreaking, + ChangeType: SchemaQueryTypeChanged, + }, + { + name: "Schema with root query type removed", + oldSchema: ` + schema { + query: RootQuery + }`, + newSchema: ``, + criticality: Breaking, + ChangeType: SchemaQueryTypeChanged, + }, + { + name: "Schema root query type added", + oldSchema: ` + schema { + mutation: RootMutation + }`, + newSchema: ` + schema { + query: RootQuery + mutation: RootMutation + }`, + criticality: NonBreaking, + ChangeType: SchemaQueryTypeChanged, + }, + { + name: "Schema root query type removed", + oldSchema: ` + schema { + query: RootQuery + mutation: RootMutation + }`, + newSchema: ` + schema { + mutation: RootMutation + }`, + criticality: Breaking, + ChangeType: SchemaQueryTypeChanged, + }, + { + name: "Schema root mutation type changed", + oldSchema: ` + schema { + query: RootQuery + mutation: RootMutation + } + `, + newSchema: ` + schema { + query: RootQuery + mutation: RootMutationChanged + }`, + criticality: Breaking, + ChangeType: SchemaMutationTypeChanged, + }, + { + name: "Schema root mutation type added", + oldSchema: ` + schema { + query: RootQuery + }`, + newSchema: ` + schema { + query: RootQuery + mutation: RootMutation + }`, + criticality: NonBreaking, + ChangeType: SchemaMutationTypeChanged, + }, + { + name: "Schema root mutation type removed", + oldSchema: ` + schema { + query: RootQuery + mutation: RootMutation + }`, + newSchema: ` + schema { + query: RootQuery + }`, + criticality: Breaking, + ChangeType: SchemaMutationTypeChanged, + }, + { + name: "Schema root subscription type changed", + oldSchema: ` + schema { + query: RootQuery + subscription: RootSubscription + }`, + newSchema: ` + schema { + query: RootQuery + subscription: RootSubscriptionChanged + }`, + criticality: Breaking, + ChangeType: SchemaSubscriptionTypeChanged, + }, + { + name: "Schema root subscription type added", + oldSchema: ` + schema { + query: RootQuery + }`, + newSchema: ` + schema { + query: RootQuery + subscription: RootSubscription + }`, + criticality: NonBreaking, + ChangeType: SchemaSubscriptionTypeChanged, + }, + { + name: "Schema root subscription type removed", + oldSchema: ` + schema { + query: RootQuery + subscription: RootSubscription + }`, + newSchema: ` + schema { + query: RootQuery + }`, + criticality: Breaking, + ChangeType: SchemaSubscriptionTypeChanged, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareTypes(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "A new type added", + oldSchema: ` + type User { + name : String! + } + `, + newSchema: ` + type User { + name : String! + } + type Book { + name: String + } + `, + criticality: NonBreaking, + ChangeType: TypeAdded, + }, + { + name: "An existing type removed", + oldSchema: ` + type User { + name : String! + } + type Book { + name: String + } + `, + newSchema: ` + type User { + name : String! + } + `, + criticality: Breaking, + ChangeType: TypeRemoved, + }, + { + name: "An extended type added", + oldSchema: ` + type User { + name : String! + } + `, + newSchema: ` + type User { + name : String! + } + extend type Book { + name: String + } + `, + criticality: NonBreaking, + ChangeType: TypeAdded, + }, + { + name: "An existing extended type removed", + oldSchema: ` + type User { + name : String! + } + extend type Book { + name: String + } + `, + newSchema: ` + type User { + name : String! + } + `, + criticality: Breaking, + ChangeType: TypeRemoved, + }, + { + name: "A type kind changed", + oldSchema: ` + type User { + name : String! + } + `, + newSchema: ` + interface User { + name : String! + } + `, + criticality: Breaking, + ChangeType: TypeKindChanged, + }, + { + name: "A type description changed", + oldSchema: ` + """ + User description + """ + type User { + name : String! + } + `, + newSchema: ` + """ + User description changed + """ + type User { + name : String! + } + `, + criticality: NonBreaking, + ChangeType: TypeDescriptionChanged, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareObjectTypes(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Object type no longer implements interface", + oldSchema: ` + type Employee implements User{ + name : String! + } + `, + newSchema: ` + type Employee { + name : String! + } + `, + criticality: Breaking, + ChangeType: ObjectTypeInterfaceRemoved, + }, + { + name: "Object type implements new interface", + oldSchema: ` + type Employee { + name : String! + } + `, + newSchema: ` + type Employee implements User{ + name : String! + } + `, + criticality: Dangerous, + ChangeType: ObjectTypeInterfaceAdded, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareFields(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Field type changed", + oldSchema: ` + type Employee { + name : String! + } + `, + newSchema: ` + type Employee { + name : Int! + } + `, + criticality: Breaking, + ChangeType: FieldTypeChanged, + }, + { + name: "Field type change from optional to required ", + oldSchema: ` + type Employee { + name : String + } + `, + newSchema: ` + type Employee { + name : String! + } + `, + criticality: NonBreaking, + ChangeType: FieldTypeChanged, + }, + { + name: "Field type change from required to optional", + oldSchema: ` + type Employee { + name : String! + } + `, + newSchema: ` + type Employee { + name : String + } + `, + criticality: Breaking, + ChangeType: FieldTypeChanged, + }, + { + name: "Field type change from required list to optional list", + oldSchema: ` + type Employee { + name : [String!] + } + `, + newSchema: ` + type Employee { + name : [String] + } + `, + criticality: Breaking, + ChangeType: FieldTypeChanged, + }, + { + name: "Field description changed", + oldSchema: ` + type Employee { + """ + field description + """ + name : String + } + `, + newSchema: ` + type Employee { + """ + field description changed + """ + name : String + } + `, + criticality: NonBreaking, + ChangeType: FieldDescriptionChanged, + }, + { + name: "Field deprecation added", + oldSchema: ` + type Employee { + name : String + newName: String! + } + `, + newSchema: ` + type Employee { + name : String @deprecated(reason: "use newName") + newName: String! + } + `, + criticality: Dangerous, + ChangeType: FieldDeprecationAdded, + }, + { + name: "Field deprecation removed", + oldSchema: ` + type Employee { + name : String @deprecated(reason: "some reason") + newName: String! + } + `, + newSchema: ` + type Employee { + name : String + newName: String! + } + `, + criticality: Dangerous, + ChangeType: FieldDeprecationRemoved, + }, + { + name: "Field deprecation reason changed", + oldSchema: ` + type Employee { + name : String @deprecated(reason: "some reason") + newName: String! + } + `, + newSchema: ` + type Employee { + name : String @deprecated(reason: "some reason changed") + newName: String! + } + `, + criticality: NonBreaking, + ChangeType: FieldDeprecationReasonChanged, + }, + { + name: "Field removed", + oldSchema: ` + type Employee { + name : String @deprecated(reason: "some reason") + newName: String! + } + `, + newSchema: ` + type Employee { + newName: String! + } + `, + criticality: Breaking, + ChangeType: FieldRemoved, + }, + { + name: "Field added", + oldSchema: ` + type Employee { + name : String + } + `, + newSchema: ` + type Employee { + name : String + newName: String! + } + `, + criticality: NonBreaking, + ChangeType: FieldAdded, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareFieldArguments(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Field argument removed", + oldSchema: ` + type Query { + reviews(offset:Int, limit:Int) : String! + } + `, + newSchema: ` + type Query { + reviews(offset:Int) : String! + } + `, + criticality: Breaking, + ChangeType: FieldArgumentRemoved, + }, + { + name: "Optional Argument added to field", + oldSchema: ` + type Query { + reviews(offset:Int) : String! + } + `, + newSchema: ` + type Query { + reviews(offset:Int, limit:Int) : String! + } + `, + criticality: Dangerous, + ChangeType: FieldArgumentAdded, + }, + { + name: "required Argument added to field", + oldSchema: ` + type Query { + reviews(offset:Int) : String! + } + `, + newSchema: ` + type Query { + reviews(offset:Int, limit:Int!) : String! + } + `, + criticality: Breaking, + ChangeType: FieldArgumentAdded, + }, + { + name: "Field Argument type changed", + oldSchema: ` + type Query { + reviews(offset:Int, limit:Int!) : String! + } + `, + newSchema: ` + type Query { + reviews(offset:Int, limit:Float!) : String! + } + `, + criticality: Breaking, + ChangeType: FieldArgumentTypeChanged, + }, + { + name: "Field Argument type changed to required", + oldSchema: ` + type Query { + reviews(offset:Int, limit:Int) : String! + } + `, + newSchema: ` + type Query { + reviews(offset:Int, limit:Int!) : String! + } + `, + criticality: Breaking, + ChangeType: FieldArgumentTypeChanged, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareInputObjectFields(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Input object field removed", + oldSchema: ` + input UserInput{ + id : ID! + name : String! + } + `, + newSchema: ` + input UserInput{ + id : ID! + } + `, + criticality: Breaking, + ChangeType: InputFieldRemoved, + }, + { + name: "Optional input field Added", + oldSchema: ` + input UserInput{ + id : ID! + } + `, + newSchema: ` + input UserInput{ + id : ID! + name : String + } + `, + criticality: Dangerous, + ChangeType: InputFieldAdded, + }, + { + name: "Required input field added", + oldSchema: ` + input UserInput{ + id : ID! + } + `, + newSchema: ` + input UserInput{ + id : ID! + name : String! + } + `, + criticality: Breaking, + ChangeType: InputFieldAdded, + }, + { + name: "Input field type changed to required", + oldSchema: ` + input UserInput{ + id : ID! + name : String + } + `, + newSchema: ` + input UserInput{ + id : ID! + name : String! + } + `, + criticality: Breaking, + ChangeType: InputFieldTypeChanged, + }, + { + name: "Input field type changed", + oldSchema: ` + input UserInput{ + id : ID! + name : String + } + `, + newSchema: ` + input UserInput{ + id : ID! + name : Name! + } + `, + criticality: Breaking, + ChangeType: InputFieldTypeChanged, + }, + { + name: "Input field type changed to optional", + oldSchema: ` + input UserInput{ + addresses : [String]! + } + `, + newSchema: ` + input UserInput{ + addresses : [String] + } + `, + criticality: NonBreaking, + ChangeType: InputFieldTypeChanged, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareEnumType(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Enum value removed", + oldSchema: ` + enum Color{ + RED + GREEN + } + `, + newSchema: ` + enum Color{ + RED + } + `, + criticality: Breaking, + ChangeType: EnumValueRemoved, + }, + { + name: "Enum value added", + oldSchema: ` + enum Color{ + RED + } + `, + newSchema: ` + enum Color{ + RED + GREEN + } + `, + criticality: Dangerous, + ChangeType: EnumValueAdded, + }, + { + name: "Enum description changed", + oldSchema: ` + enum Color{ + """ + Enum value description + """ + RED + } + `, + newSchema: ` + enum Color{ + """ + Enum value description changed + """ + RED + } + `, + criticality: NonBreaking, + ChangeType: EnumValueDescriptionChanged, + }, + { + name: "Enum deprecation added", + oldSchema: ` + enum Color{ + RED + GREEN + } + `, + newSchema: ` + enum Color{ + RED + GREEN @deprecated(reason:"some reason") + } + `, + criticality: Dangerous, + ChangeType: EnumValueDeprecationAdded, + }, + { + name: "Enum deprecation reason changed", + oldSchema: ` + enum Color{ + RED + GREEN @deprecated(reason:"some reason") + } + `, + newSchema: ` + enum Color{ + RED + GREEN @deprecated(reason:"some reason changed") + } + `, + criticality: NonBreaking, + ChangeType: EnumValueDeprecationReasonChanged, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareUnions(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Union member removed", + oldSchema: ` + union Body = Image | Text + `, + newSchema: ` + union Body = Image + `, + criticality: Breaking, + ChangeType: UnionMemberRemoved, + }, + { + name: "Object type implements new interface", + oldSchema: ` + union Body = Image + `, + newSchema: ` + union Body = Image | Text + `, + criticality: Dangerous, + ChangeType: UnionMemberAdded, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareDirectives(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Directive removed", + oldSchema: ` + directive @stream on FIELD + directive @transform(from: String!) on FIELD + `, + newSchema: ` + directive @transform(from: String!) on FIELD + `, + criticality: Breaking, + ChangeType: DirectiveRemoved, + }, + { + name: "Directive added", + oldSchema: ` + directive @stream on FIELD + `, + newSchema: ` + directive @stream on FIELD + directive @transform(from: String!) on FIELD + `, + criticality: NonBreaking, + ChangeType: DirectiveAdded, + }, + { + name: "Directive description changed", + oldSchema: ` + """ + Some description + """ + directive @transform(from: String!) on FIELD + `, + newSchema: ` + """ + Some description changed + """ + directive @transform(from: String!) on FIELD + `, + criticality: NonBreaking, + ChangeType: DirectiveDescriptionChanged, + }, + { + name: "Directive location removed", + oldSchema: ` + directive @transform(from: String!) on OBJECT | INTERFACE + `, + newSchema: ` + directive @transform(from: String!) on OBJECT + `, + criticality: Breaking, + ChangeType: DirectiveLocationRemoved, + }, + { + name: "Directive location added", + oldSchema: ` + directive @transform(from: String!) on OBJECT + `, + newSchema: ` + directive @transform(from: String!) on OBJECT | INTERFACE + `, + criticality: NonBreaking, + ChangeType: DirectiveLocationAdded, + }, + { + name: "Directive repeatable removed", + oldSchema: ` + directive @transform(from: String!) repeatable on OBJECT + `, + newSchema: ` + directive @transform(from: String!) on OBJECT + `, + criticality: Breaking, + ChangeType: DirectiveRepeatableRemoved, + }, + { + name: "Directive repeatable added", + oldSchema: ` + directive @transform(from: String!) on OBJECT + `, + newSchema: ` + directive @transform(from: String!) repeatable on OBJECT + `, + criticality: NonBreaking, + ChangeType: DirectiveRepeatableAdded, + }, + { + name: "Directive argument removed", + oldSchema: ` + directive @transform(from: String, to:string) on OBJECT + `, + newSchema: ` + directive @transform(from: String) on OBJECT + `, + criticality: Breaking, + ChangeType: DirectiveArgumentRemoved, + }, + { + name: "Directive argument added", + oldSchema: ` + directive @transform(from: String) on OBJECT + `, + newSchema: ` + directive @transform(from: String, to:string) on OBJECT + `, + criticality: NonBreaking, + ChangeType: DirectiveArgumentAdded, + }, + { + name: "Directive required argument added", + oldSchema: ` + directive @transform(from: String) on OBJECT + `, + newSchema: ` + directive @transform(from: String, to:string!) on OBJECT + `, + criticality: Breaking, + ChangeType: DirectiveArgumentAdded, + }, + { + name: "Directive argument type changed", + oldSchema: ` + directive @transform(from: String) on OBJECT + `, + newSchema: ` + directive @transform(from: Int) on OBJECT + `, + criticality: Breaking, + ChangeType: DirectiveArgumentTypeChanged, + }, + { + name: "Directive argument type changed from required to optional", + oldSchema: ` + directive @transform(from: String!) on OBJECT + `, + newSchema: ` + directive @transform(from: String) on OBJECT + `, + criticality: NonBreaking, + ChangeType: DirectiveArgumentTypeChanged, + }, + { + name: "Directive argument type changed from optional to required", + oldSchema: ` + directive @transform(from: String) on OBJECT + `, + newSchema: ` + directive @transform(from: String!) on OBJECT + `, + criticality: Breaking, + ChangeType: DirectiveArgumentTypeChanged, + }, + { + name: "Directive argument default value changed", + oldSchema: ` + directive @transform(from: String = "value") on OBJECT + `, + newSchema: ` + directive @transform(from: String = "value changed") on OBJECT + `, + criticality: Dangerous, + ChangeType: DirectiveArgumentDefaultValueChanged, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareTypeDirectives(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Type directive removed", + oldSchema: ` + type Book @key(fields: "isbn") { + isbn: String! + title: String + } + `, + newSchema: ` + type Book { + isbn: String! + title: String + } + `, + criticality: Dangerous, + ChangeType: DirectiveRemoved, + }, + { + name: "Type directive added", + oldSchema: ` + type Book { + isbn: String! + title: String + } + `, + newSchema: ` + type Book @key(field1: "isbn") { + isbn: String! + title: String + } + `, + criticality: NonBreaking, + ChangeType: DirectiveAdded, + }, + { + name: "Type directive argument value changed", + oldSchema: ` + type Book @key(fields: "isbn") { + isbn: String! + title: String + } + `, + newSchema: ` + type Book @key(fields: "isbn title") { + isbn: String! + title: String + } + `, + criticality: Dangerous, + ChangeType: DirectiveArgumentValueChanged, + }, + { + name: "type directive argument removed", + oldSchema: ` + type Book @key(field1: "isbn", field2: "title") { + isbn: String! + title: String + } + `, + newSchema: ` + type Book @key(field1: "isbn") { + isbn: String! + title: String + } + `, + criticality: Dangerous, + ChangeType: DirectiveArgumentRemoved, + }, + { + name: "Type directive argument added", + oldSchema: ` + type Book @key(field1: "isbn") { + isbn: String! + title: String + } + `, + newSchema: ` + type Book @key(field1: "isbn", field2: "title") { + isbn: String! + title: String + } + `, + criticality: NonBreaking, + ChangeType: DirectiveArgumentAdded, + }, + { + name: "one of the repetitive directive is removed ", + oldSchema: ` + type Book + @graph(type:"book", key: "isbn") + @graph(type:"library", key: "isbn") { + isbn: String! + title: String + } + `, + newSchema: ` + type Book + @graph(type:"book", key: "isbn") { + isbn: String! + title: String + } + `, + criticality: Dangerous, + ChangeType: DirectiveChanged, + }, + { + name: "repetitive directive is changed ", + oldSchema: ` + type Book + @graph(type:"book", key: "isbn") + @graph(type:"library", key: "isbn") { + isbn: String! + title: String + } + `, + newSchema: ` + type Book + @graph(type:"book", key: "isbn") + @graph(type:"user", key: "isbn") { + isbn: String! + title: String + } + `, + criticality: Dangerous, + ChangeType: DirectiveChanged, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareFieldDirectives(t *testing.T) { + tests := []struct { + name string + oldSchema string + newSchema string + criticality Criticality + ChangeType ChangeType + }{ + { + name: "Field directive removed", + oldSchema: ` + type Book { + isbn: String! @exposure(scope: [PARTNER]) + title: String + } + `, + newSchema: ` + type Book { + isbn: String! + title: String + } + `, + criticality: Dangerous, + ChangeType: DirectiveRemoved, + }, + { + name: "Field directive added", + oldSchema: ` + type Book { + isbn: String! + title: String + } + `, + newSchema: ` + type Book { + isbn: String! @exposure(scope: [PARTNER]) + title: String + } + `, + criticality: NonBreaking, + ChangeType: DirectiveAdded, + }, + { + name: "Field directive argument value changed", + oldSchema: ` + type Book { + isbn: String! @exposure(scope: [PARTNER, PUBLIC]) + title: String + } + `, + newSchema: ` + type Book { + isbn: String! @exposure(scope: [PARTNER]) + title: String + } + `, + criticality: Dangerous, + ChangeType: DirectiveArgumentValueChanged, + }, + { + name: "Field directive argument removed", + oldSchema: ` + type Book { + isbn: String! @directive(field1: "name", field2: "title") + title: String + } + `, + newSchema: ` + type Book { + isbn: String! @directive(field1: "name") + title: String + } + `, + criticality: Dangerous, + ChangeType: DirectiveArgumentRemoved, + }, + { + name: "Field directive argument added", + oldSchema: ` + type Book { + isbn: String! @directive(field1: "name") + title: String + } + `, + newSchema: ` + type Book { + isbn: String! @directive(field1: "name", field2: "title") + title: String + } + `, + criticality: NonBreaking, + ChangeType: DirectiveArgumentAdded, + }, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + oldSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.oldSchema, + }) + if err != nil { + t.Fatalf("error parsing old schema, error = %v", err) + } + newSchema, err := parser.ParseSchema(&ast.Source{ + Input: tt.newSchema, + }) + if err != nil { + t.Fatalf("error parsing new schema, error = %v", err) + } + changes := FindChangesInSchemas(oldSchema, newSchema) + if len(changes) != 1 { + t.Errorf("Unexpected changes added = %v", changes) + } + if changes[0].criticalityLevel != tt.criticality || changes[0].changeType != tt.ChangeType { + t.Errorf("Object type changes = %v", changes[0]) + } + }) + } +} + +func TestCompareSchemaFiles(t *testing.T) { + t.Run("Compare schema files", func(t *testing.T) { + sourceDir := "./test_schema" + schemaOldContents, err := utils.ReadFiles(path.Join(sourceDir, "oldSchema.graphql")) + if err != nil { + t.Errorf("error reading oldSchema = %v", err) + } + + schemaNewContents, err := utils.ReadFiles(path.Join(sourceDir, "newSchema.graphql")) + if err != nil { + t.Errorf("error reading newSchema = %v", err) + } + + schemaOld, parseErr := utils.ParseSchema(schemaOldContents) + if parseErr != nil { + t.Errorf("error parsing oldSchema = %v", err) + } + + schemaNew, parseErr := utils.ParseSchema(schemaNewContents) + if parseErr != nil { + t.Errorf("error reading newSchema = %v", err) + } + + changes := FindChangesInSchemas(schemaOld, schemaNew) + if len(changes) != 7 { + t.Errorf("Unexpected changes added = %v", changes) + } + }) +} diff --git a/pkg/compare/test_schema/newSchema.graphql b/pkg/compare/test_schema/newSchema.graphql new file mode 100644 index 0000000..bcb1387 --- /dev/null +++ b/pkg/compare/test_schema/newSchema.graphql @@ -0,0 +1,17 @@ +type Query { + book(isbn: String!): [Book] + books: [Book] + library(id: ID!): Library +} + +type Library { + id: ID! + name: String + books: [Book]! +} + +type Book { + isbn: String! + title: String + similarBooks: [Book]! +} diff --git a/pkg/compare/test_schema/oldSchema.graphql b/pkg/compare/test_schema/oldSchema.graphql new file mode 100644 index 0000000..ac89802 --- /dev/null +++ b/pkg/compare/test_schema/oldSchema.graphql @@ -0,0 +1,16 @@ +type Query { + book(isbn: String!): Book + library(id: ID!): Library +} + +type Library { + id: ID! + name: String +} + +type Book { + isbn: String + title: String! + year: Int + similarBooks: [Book]! +} diff --git a/pkg/linter/lint_error.go b/pkg/linter/lint_error.go new file mode 100644 index 0000000..561a1a2 --- /dev/null +++ b/pkg/linter/lint_error.go @@ -0,0 +1,41 @@ +package linter + +import ( + "sort" +) + +// LintErrorWithMetadata represent a lint error. It stores metadata such as Rule for which error happened, position and actual error. +type LintErrorWithMetadata struct { + Rule LintRule + Line, Column int + Err error +} + +// LintErrorsWithMetadata represent collection of lint errors. +type LintErrorsWithMetadata []LintErrorWithMetadata + +func (e LintErrorsWithMetadata) Len() int { + return len(e) +} + +// Less returns true if position at which error[i] occurred is before error[j] +func (e LintErrorsWithMetadata) Less(i, j int) bool { + if e[i].Line < e[j].Line { + return true + } + if e[i].Line > e[j].Line { + return false + } + return e[i].Column < e[j].Column +} + +// Swap swaps values in error list for two given indices +func (e LintErrorsWithMetadata) Swap(i, j int) { + e[i], e[j] = e[j], e[i] +} + +// GetSortedErrors returns sorted list of errors with metadata. Sorting is done for Line-Column position for lint error +func (e LintErrorsWithMetadata) GetSortedErrors() []LintErrorWithMetadata { + sort.Sort(e) + return e +} diff --git a/pkg/linter/rules.go b/pkg/linter/rules.go new file mode 100644 index 0000000..56bdc5e --- /dev/null +++ b/pkg/linter/rules.go @@ -0,0 +1,496 @@ +package linter + +import ( + "fmt" + "regexp" + "strings" + + "github.com/vektah/gqlparser/v2/ast" +) + +var camelCaseRegex, _ = regexp.Compile("^[a-z][a-zA-Z0-9]*$") + +// LintRuleFunc is a short-form for the function signature for every lint Rule function should have +type LintRuleFunc = func(schema *ast.SchemaDocument) LintErrorsWithMetadata + +// LintRule is the name of the lint Rule internally +type LintRule string + +// LintRuleMetadata holds information for a given lint Rule +type LintRuleMetadata struct { + Name LintRule + description string + RuleFunction LintRuleFunc +} + +// AvailableRulesWithDescription returns the comma separated list of rules with description +func AvailableRulesWithDescription() string { + availableRulesWithDescription := make([]string, 0) + for _, rule := range AllTheRules { + ruleWithDescription := fmt.Sprintf(" %s => %s", rule.Name, rule.description) + availableRulesWithDescription = append(availableRulesWithDescription, ruleWithDescription) + } + return strings.Join(availableRulesWithDescription, "\n") +} + +const ( + typeDesc = "type-desc" + argsDesc = "args-desc" + fieldDesc = "field-desc" + enumCaps = "enum-caps" + enumDesc = "enum-desc" + fieldCamel = "field-camel" + typeCaps = "type-caps" + relayConnType = "relay-conn-type" + relayConnArgs = "relay-conn-args" +) + +// AllTheRules is a list of all the lint rules available +var AllTheRules = []LintRuleMetadata{ + { + typeDesc, + "type-desc checks whether all the types defined have description", + TypesHaveDescription, + }, + { + argsDesc, + "args-desc checks whether arguments have description", + ArgumentsHaveDescription, + }, + { + fieldDesc, + "field-desc checks whether fields have description", + FieldsHaveDescription, + }, + { + enumCaps, + "enum-caps checks whether Enum values are all UPPER_CASE", + EnumValuesAreAllCaps, + }, + { + enumDesc, + "enum-desc checks whether Enum values have description", + EnumValuesHaveDescriptions, + }, + { + fieldCamel, + "field-camel checks whether fields defined are all camelCase", + FieldsAreCamelCased, + }, + { + typeCaps, + "type-caps checks whether types defined are Capitalized", + TypesAreCapitalized, + }, + { + relayConnType, + "relay-conn-type checks if Connection Types follow the Relay Cursor Connections Specification", + RelayConnectionTypesSpec, + }, + { + relayConnArgs, + "relay-conn-args checks if Connection Args follow of the Relay Cursor Connections Specification", + RelayConnectionArgumentsSpec, + }, +} + +// TypesHaveDescription checks whether all the types defined have description +func TypesHaveDescription(schema *ast.SchemaDocument) LintErrorsWithMetadata { + errors := make([]LintErrorWithMetadata, 0) + for _, definition := range schema.Definitions { + if len(definition.Description) == 0 { + lintError := LintErrorWithMetadata{ + Rule: typeDesc, + Line: definition.Position.Line, + Column: definition.Position.Column, + Err: fmt.Errorf("type %s does not have description", definition.Name), + } + errors = append(errors, lintError) + } + } + // extended types should not have descriptions since that can collide with type being extended + return errors +} + +// ArgumentsHaveDescription checks whether arguments have description +func ArgumentsHaveDescription(schema *ast.SchemaDocument) LintErrorsWithMetadata { + errors := make([]LintErrorWithMetadata, 0) + for _, definition := range schema.Definitions { + if definition.IsCompositeType() { + for _, field := range definition.Fields { + for _, argument := range field.Arguments { + if len(argument.Description) == 0 { + lintError := LintErrorWithMetadata{ + Rule: argsDesc, + Line: argument.Position.Line, + Column: argument.Position.Column, + Err: fmt.Errorf("argument %s.%s.%s does not have description", definition.Name, field.Name, argument.Name), + } + errors = append(errors, lintError) + } + } + } + } + } + // extended types are not included in schema.definitions but schema.extensions + for _, definition := range schema.Extensions { + if definition.IsCompositeType() { + for _, field := range definition.Fields { + for _, argument := range field.Arguments { + if len(argument.Description) == 0 { + lintError := LintErrorWithMetadata{ + Rule: argsDesc, + Line: argument.Position.Line, + Column: argument.Position.Column, + Err: fmt.Errorf("argument %s.%s.%s does not have description", definition.Name, field.Name, argument.Name), + } + errors = append(errors, lintError) + } + } + } + } + } + return errors +} + +// FieldsHaveDescription checks whether fields have description +func FieldsHaveDescription(schema *ast.SchemaDocument) LintErrorsWithMetadata { + errors := make([]LintErrorWithMetadata, 0) + for _, definition := range schema.Definitions { + for _, fieldDefinition := range definition.Fields { + if len(fieldDefinition.Description) == 0 { + lintError := LintErrorWithMetadata{ + Rule: fieldDesc, + Line: fieldDefinition.Type.Position.Line, + Column: fieldDefinition.Type.Position.Column, + Err: fmt.Errorf("field %s.%s does not have description", definition.Name, fieldDefinition.Name), + } + errors = append(errors, lintError) + } + } + } + // extended types are not included in schema.definitions but schema.extensions + for _, definition := range schema.Extensions { + for _, fieldDefinition := range definition.Fields { + if len(fieldDefinition.Description) == 0 { + lintError := LintErrorWithMetadata{ + Rule: fieldDesc, + Line: fieldDefinition.Type.Position.Line, + Column: fieldDefinition.Type.Position.Column, + Err: fmt.Errorf("field %s.%s does not have description", definition.Name, fieldDefinition.Name), + } + errors = append(errors, lintError) + } + } + } + // ToDo: we should not allow comment on fields with @external directive as well. This is inline with gqlparser not allowing descriptions for extended types. + return errors +} + +// EnumValuesAreAllCaps checks whether Enum values are all UPPER_CASE +func EnumValuesAreAllCaps(schema *ast.SchemaDocument) LintErrorsWithMetadata { + errors := make([]LintErrorWithMetadata, 0) + for _, definition := range schema.Definitions { + if definition.Kind == ast.Enum { + for _, enumValue := range definition.EnumValues { + if strings.ToUpper(enumValue.Name) != enumValue.Name { + lintError := LintErrorWithMetadata{ + Rule: enumCaps, + Line: enumValue.Position.Line, + Column: enumValue.Position.Column, + Err: fmt.Errorf("enum value %s.%s is not uppercase", definition.Name, enumValue.Name), + } + errors = append(errors, lintError) + } + } + } + } + // extended types are not included in schema.definitions but schema.extensions + for _, definition := range schema.Extensions { + if definition.Kind == ast.Enum { + for _, enumValue := range definition.EnumValues { + if strings.ToUpper(enumValue.Name) != enumValue.Name { + lintError := LintErrorWithMetadata{ + Rule: enumCaps, + Line: enumValue.Position.Line, + Column: enumValue.Position.Column, + Err: fmt.Errorf("extended enum value %s.%s is not uppercase", definition.Name, enumValue.Name), + } + errors = append(errors, lintError) + } + } + } + } + return errors +} + +// EnumValuesHaveDescriptions checks whether Enum values have description +func EnumValuesHaveDescriptions(schema *ast.SchemaDocument) LintErrorsWithMetadata { + errors := make([]LintErrorWithMetadata, 0) + for _, definition := range schema.Definitions { + typeDefinition := schema.Definitions.ForName(definition.Name) + if typeDefinition.Kind == ast.Enum { + for _, enumValue := range typeDefinition.EnumValues { + if len(enumValue.Description) == 0 { + lintError := LintErrorWithMetadata{ + Rule: enumDesc, + Line: enumValue.Position.Line, + Column: enumValue.Position.Column, + Err: fmt.Errorf("enum value %s.%s does not have description", typeDefinition.Name, enumValue.Name), + } + errors = append(errors, lintError) + } + } + } + } + // extended types are not included in schema.definitions but schema.extensions + for _, definition := range schema.Extensions { + if definition.Kind == ast.Enum { + for _, enumValue := range definition.EnumValues { + if len(enumValue.Description) == 0 { + lintError := LintErrorWithMetadata{ + Rule: enumDesc, + Line: enumValue.Position.Line, + Column: enumValue.Position.Column, + Err: fmt.Errorf("extended enum value %s.%s does not have description", definition.Name, enumValue.Name), + } + errors = append(errors, lintError) + } + } + } + } + return errors +} + +// FieldsAreCamelCased checks whether fields defined are all camelCase +func FieldsAreCamelCased(schema *ast.SchemaDocument) LintErrorsWithMetadata { + errors := make([]LintErrorWithMetadata, 0) + for _, definition := range schema.Definitions { + for _, fieldDefinition := range definition.Fields { + if !camelCaseRegex.MatchString(fieldDefinition.Name) { + lintError := LintErrorWithMetadata{ + Rule: fieldCamel, + Line: fieldDefinition.Type.Position.Line, + Column: fieldDefinition.Type.Position.Column, + Err: fmt.Errorf("field %s.%s is not camelcased", definition.Name, fieldDefinition.Name), + } + errors = append(errors, lintError) + } + } + } + // extended types are not included in schema.definitions but schema.extensions + for _, definition := range schema.Extensions { + for _, fieldDefinition := range definition.Fields { + if !camelCaseRegex.MatchString(fieldDefinition.Name) { + lintError := LintErrorWithMetadata{ + Rule: fieldCamel, + Line: fieldDefinition.Type.Position.Line, + Column: fieldDefinition.Type.Position.Column, + Err: fmt.Errorf("field %s.%s is not camelcased", definition.Name, fieldDefinition.Name), + } + errors = append(errors, lintError) + } + } + } + return errors +} + +// TypesAreCapitalized checks whether types defined are Capitalized +func TypesAreCapitalized(schema *ast.SchemaDocument) LintErrorsWithMetadata { + errors := make([]LintErrorWithMetadata, 0) + for _, typeDefinition := range schema.Definitions { + if typeDefinition.Name[0] > 97 && typeDefinition.Name[0] <= 122 { + lintError := LintErrorWithMetadata{ + Rule: typeCaps, + Line: typeDefinition.Position.Line, + Column: typeDefinition.Position.Column, + Err: fmt.Errorf("type %s is not capitalized", typeDefinition.Name), + } + errors = append(errors, lintError) + } + } + // extended types are not included in schema.definitions but schema.extensions + for _, typeDefinition := range schema.Extensions { + if typeDefinition.Name[0] > 97 && typeDefinition.Name[0] <= 122 { + lintError := LintErrorWithMetadata{ + Rule: typeCaps, + Line: typeDefinition.Position.Line, + Column: typeDefinition.Position.Column, + Err: fmt.Errorf("extended type %s is not capitalized", typeDefinition.Name), + } + errors = append(errors, lintError) + } + } + return errors +} + +// RelayConnectionTypesSpec will validate the schema adheres to section 2 (Connection Types) of the Relay Cursor Connections Specification. +// See https://relay.dev/graphql/connections.htm#sec-Connection-Types and https://relay.dev/graphql/connections.htm +func RelayConnectionTypesSpec(schema *ast.SchemaDocument) LintErrorsWithMetadata { + errors := make([]LintErrorWithMetadata, 0) + + for _, typeDefinition := range schema.Definitions { + if strings.HasSuffix(typeDefinition.Name, "Connection") { + if typeDefinition.Kind != ast.Object { + lintError := LintErrorWithMetadata{ + Rule: relayConnType, + Line: typeDefinition.Position.Line, + Column: typeDefinition.Position.Column, + Err: fmt.Errorf("%d:%d type %s cannot end with Connection as that is reserved for entities", typeDefinition.Position.Line, typeDefinition.Position.Column, typeDefinition.Name), + } + errors = append(errors, lintError) + continue + } + + var foundEdgesField, foundPageInfoField bool + for _, fieldDefinition := range typeDefinition.Fields { + if fieldDefinition.Name == "edges" { + foundEdgesField = true + if !isFieldListType(fieldDefinition) { + lintError := LintErrorWithMetadata{ + Rule: relayConnType, + Line: fieldDefinition.Type.Position.Line, + Column: fieldDefinition.Type.Position.Column, + Err: fmt.Errorf("%d:%d edges field from Connection type %s needs to return a list type", fieldDefinition.Type.Position.Line, fieldDefinition.Type.Position.Column, typeDefinition.Name), + } + errors = append(errors, lintError) + } + + } else if fieldDefinition.Name == "pageInfo" { + foundPageInfoField = true + + // this is to account for extra spaces such as PageInfo ! + if fieldDefinition.Type.Name() != "PageInfo" || !fieldDefinition.Type.NonNull || isFieldListType(fieldDefinition) { + lintError := LintErrorWithMetadata{ + Rule: relayConnType, + Line: fieldDefinition.Type.Position.Line, + Column: fieldDefinition.Type.Position.Column, + Err: fmt.Errorf("%d:%d pageInfo field from Connection type %s needs to return a non-null PageInfo object", fieldDefinition.Type.Position.Line, fieldDefinition.Type.Position.Column, typeDefinition.Name), + } + errors = append(errors, lintError) + } + } + } + + if !foundEdgesField { + lintError := LintErrorWithMetadata{ + Rule: relayConnType, + Line: typeDefinition.Position.Line, + Column: typeDefinition.Position.Column, + Err: fmt.Errorf("%d:%d type %s is a Connection type and therefore needs to have a field named 'edges' that returns a list type", typeDefinition.Position.Line, typeDefinition.Position.Column, typeDefinition.Name), + } + errors = append(errors, lintError) + } + + if !foundPageInfoField { + lintError := LintErrorWithMetadata{ + Rule: relayConnType, + Line: typeDefinition.Position.Line, + Column: typeDefinition.Position.Column, + Err: fmt.Errorf("%d:%d type %s is a Connection type and therefore needs to have a field named 'pageInfo' that returns a non-null PageInfo object", typeDefinition.Position.Line, typeDefinition.Position.Column, typeDefinition.Name), + } + errors = append(errors, lintError) + } + } + } + + return errors +} + +// RelayConnectionArgumentsSpec will validate the schema adheres to section 4 (Arguments) of the Relay Cursor Connections Specification. +// See https://relay.dev/graphql/connections.htm#sec-Arguments and https://relay.dev/graphql/connections.htm +func RelayConnectionArgumentsSpec(schema *ast.SchemaDocument) LintErrorsWithMetadata { + errors := make([]LintErrorWithMetadata, 0) + + for _, typeDefinition := range schema.Definitions { + for _, fieldDefinition := range typeDefinition.Fields { + var firstArgument, afterArgument, lastArgument, beforeArgument *ast.ArgumentDefinition + if strings.HasSuffix(fieldDefinition.Type.Name(), "Connection") { + for _, argumentDefinition := range fieldDefinition.Arguments { + switch argumentDefinition.Name { + case "first": + firstArgument = argumentDefinition + case "after": + afterArgument = argumentDefinition + case "last": + lastArgument = argumentDefinition + case "before": + beforeArgument = argumentDefinition + } + } + + hasForwardPagination := firstArgument != nil && afterArgument != nil + hasBackwardPagination := lastArgument != nil && beforeArgument != nil + + if !hasForwardPagination && !hasBackwardPagination { + lintError := LintErrorWithMetadata{ + Rule: relayConnArgs, + Line: fieldDefinition.Type.Position.Line, + Column: fieldDefinition.Type.Position.Column, + Err: fmt.Errorf("%d:%d field %s returns a Connection type and therefore must include forward pagination arguments (`first` and `after`) and/or backward pagination arguments (`last` and `before`) as per the Relay spec", fieldDefinition.Type.Position.Line, fieldDefinition.Type.Position.Column, fieldDefinition.Name), // nolint: lll + } + errors = append(errors, lintError) + } + + if firstArgument != nil { + if hasBackwardPagination { + if firstArgument.Type.NamedType == "" || firstArgument.Type.NonNull || firstArgument.Type.Name() != "Int" { + lintError := LintErrorWithMetadata{ + Rule: relayConnArgs, + Line: firstArgument.Position.Line, + Column: firstArgument.Position.Column, + Err: fmt.Errorf("%d:%d field %s is returns a Connection type that has both forward and backward pagination and therefore `first` argument should take a nullable non-negative integer as per the Relay spec", firstArgument.Position.Line, firstArgument.Position.Column, fieldDefinition.Name), + } + errors = append(errors, lintError) + } + } else { + if isArgListType(firstArgument) || firstArgument.Type.Name() != "Int" { + lintError := LintErrorWithMetadata{ + Rule: relayConnArgs, + Line: firstArgument.Position.Line, + Column: firstArgument.Position.Column, + Err: fmt.Errorf("%d:%d field %s is returns a Connection type and has forward pagination and therefore `first` argument should take a non-negative integer as per the Relay spec", firstArgument.Position.Line, firstArgument.Position.Column, fieldDefinition.Name), + } + errors = append(errors, lintError) + } + } + } + + if lastArgument != nil { + if hasForwardPagination { + if isArgListType(lastArgument) || lastArgument.Type.NonNull || lastArgument.Type.Name() != "Int" { + lintError := LintErrorWithMetadata{ + Rule: relayConnArgs, + Line: lastArgument.Position.Line, + Column: lastArgument.Position.Column, + Err: fmt.Errorf("%d:%d field %s is returns a Connection type that has both forward and backward pagination and therefore `last` argument should take a nullable non-negative integer as per the Relay spec", lastArgument.Position.Line, lastArgument.Position.Column, fieldDefinition.Name), + } + errors = append(errors, lintError) + } + } else { + if isArgListType(lastArgument) || lastArgument.Type.Name() != "Int" { + lintError := LintErrorWithMetadata{ + Rule: relayConnArgs, + Line: lastArgument.Position.Line, + Column: lastArgument.Position.Column, + Err: fmt.Errorf("%d:%d field %s is returns a Connection type and has backward pagination and therefore `last` argument should take a non-negative integer as per the Relay spec", lastArgument.Position.Line, lastArgument.Position.Column, fieldDefinition.Name), + } + errors = append(errors, lintError) + } + } + } + } + } + + } + + return errors +} + +func isFieldListType(fieldDefinition *ast.FieldDefinition) bool { + return fieldDefinition.Type.NamedType == "" && fieldDefinition.Type.Elem != nil +} + +func isArgListType(fieldArgument *ast.ArgumentDefinition) bool { + return fieldArgument.Type.NamedType == "" && fieldArgument.Type.Elem != nil +} diff --git a/pkg/linter/rules_test.go b/pkg/linter/rules_test.go new file mode 100644 index 0000000..4eb193a --- /dev/null +++ b/pkg/linter/rules_test.go @@ -0,0 +1,711 @@ +package linter + +import ( + "testing" + + "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/parser" +) + +func TestArgumentsHaveDescription(t *testing.T) { + tests := []struct { + name string + schema string + wantErr bool + }{ + { + "arguments_without_description", + ` + type Query { + todos(offset: Int, limit: Int): [Todo!]! + } + `, + true, + }, + { + "arguments_in_extended_type_without_description", + ` + extend type Query { + todos(offset: Int, limit: Int): [Todo!]! + } + `, + true, + }, + { + "arguments_in_extended_type_with_description", + ` + extend type Query { + todos( + "some comment about offset" + offset: Int, + "some comment about limit" + limit: Int + ): [Todo!]! + } + `, + false, + }, + { + "arguments_with_description", + ` + type Query { + todos( + "some comment about offset" + offset: Int, + "some comment about limit" + limit: Int + ): [Todo!]! + } + `, + false, + }, + { + "one_argument_with_description", + ` + type Query { + todos( + "some comment about offset" + offset: Int, + limit: Int + ): [Todo!]! + } + `, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schemaDoc, parseErr := parser.ParseSchema(&ast.Source{ + Input: tt.schema, + }) + if parseErr != nil { + t.Fatalf("ArgumentsHaveDescription() invalid input; error = %v", parseErr) + } + if errs := ArgumentsHaveDescription(schemaDoc); (errs.Len() != 0) != tt.wantErr { + t.Errorf("ArgumentsHaveDescription() error = %v, wantErr %v", errs, tt.wantErr) + } + }) + } +} + +func TestEnumValuesAreAllCaps(t *testing.T) { + tests := []struct { + name string + schema string + wantErr bool + }{ + { + "enum_value_are_lowercase", + `enum color { red, blue, green }`, + true, + }, + { + "enum_value_are_titlecase", + `enum color { Red, Blue, Green }`, + true, + }, + { + "enum_value_are_uppercase", + `enum color { RED, BLUE, GREEN }`, + false, + }, + { + "enum_value_are_mixed", + `enum color { RED, Blue, green }`, + true, + }, + { + "extended_enum_value_are_uppercase", + `extend enum color { RED, BLUE, GREEN }`, + false, + }, + { + "extended_enum_value_are_mixed", + `extend enum color { RED, Blue, green }`, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schemaDoc, parseErr := parser.ParseSchema(&ast.Source{ + Input: tt.schema, + }) + if parseErr != nil { + t.Fatalf("EnumValuesAreAllCaps() invalid input; error = %v", parseErr) + } + if errs := EnumValuesAreAllCaps(schemaDoc); (errs.Len() != 0) != tt.wantErr { + t.Errorf("EnumValuesAreAllCaps() error = %v, wantErr %v", errs, tt.wantErr) + } + }) + } +} + +func TestEnumValuesHaveDescriptions(t *testing.T) { + tests := []struct { + name string + schema string + wantErr bool + }{ + { + "enum_values_with_description", + ` + enum color { + "color red" + red, + "color blue" + blue, + "color green" + green + }`, + false, + }, + { + "enum_values_without_description", + ` + enum color { + red, + blue, + green + }`, + true, + }, + { + "some_enum_values_with_description", + ` + enum color { + red, + "color blue" + blue, + "color green" + green + }`, + true, + }, + { + "extended_enum_values_with_description", + ` + extend enum color { + "crowdstrike is red " + red, + "salesforce is blue" + blue, + "splunk is green" + green + }`, + false, + }, + { + "extended_enum_values_without_description", + ` + extend enum color { + red, + blue, + green + }`, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schemaDoc, parseErr := parser.ParseSchema(&ast.Source{ + Input: tt.schema, + }) + if parseErr != nil { + t.Fatalf("EnumValuesHaveDescriptions() invalid input; error = %v", parseErr) + } + if errs := EnumValuesHaveDescriptions(schemaDoc); (errs.Len() != 0) != tt.wantErr { + t.Errorf("EnumValuesHaveDescriptions() error = %v, wantErr %v", errs, tt.wantErr) + } + }) + } +} + +func TestFieldsAreCamelCased(t *testing.T) { + tests := []struct { + name string + schema string + wantErr bool + }{ + { + "fields_are_camelCase", + `type Query { + todos: [Todo!]! + todoList: [Todo!]! + }`, + false, + }, + { + "fields_are_TitleCase", + `type Query { + TodoList: [Todo!]! + }`, + true, + }, + { + "fields_are_UPPERCASE", + `type Query { + TODOS: [Todo!]! + }`, + true, + }, + { + "extended_type_fields_are_camelCase", + `extend type Query { + todoList: [Todo!]! + }`, + false, + }, + { + "extended_type_fields_are_TitleCase", + `extend type Query { + TodoList: [Todo!]! + }`, + true, + }, + { + "extended_type_fields_are_UPPERCASE", + `extend type Query { + TODOS: [Todo!]! + }`, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schemaDoc, parseErr := parser.ParseSchema(&ast.Source{ + Input: tt.schema, + }) + if parseErr != nil { + t.Fatalf("FieldsAreCamelCased() invalid input; error = %v", parseErr) + } + if errs := FieldsAreCamelCased(schemaDoc); (errs.Len() != 0) != tt.wantErr { + t.Errorf("FieldsAreCamelCased() error = %v, wantErr %v", errs, tt.wantErr) + } + }) + } +} + +func TestFieldsHaveDescription(t *testing.T) { + tests := []struct { + name string + schema string + wantErr bool + }{ + { + "fields_without_description", + ` + type Query { + todos(offset: Int, limit: Int): [Todo!]! + } + `, + true, + }, + { + "fields_with_description", + ` + type Query { + "todos return list of todos" + todos(offset: Int, limit: Int): [Todo!]! + } + `, + false, + }, + { + "fields_of_extended_type_without_description", + ` + extend type Query { + todos(offset: Int, limit: Int): [Todo!]! + } + `, + true, + }, + { + "fields_of_extended_type_with_description", + ` + extend type Query { + "todos return list of todos" + todos(offset: Int, limit: Int): [Todo!]! + } + `, + false, + }, + { + "one_of_the_field_without_description", + ` + type Query { + "todos return list of todos" + todos(offset: Int, limit: Int): [Todo!]! + todoList(offset: Int, limit: Int): [Todo!]! + } + `, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schemaDoc, parseErr := parser.ParseSchema(&ast.Source{ + Input: tt.schema, + }) + if parseErr != nil { + t.Fatalf("FieldsHaveDescription() invalid input; error = %v", parseErr) + } + if errs := FieldsHaveDescription(schemaDoc); (errs.Len() != 0) != tt.wantErr { + t.Errorf("FieldsHaveDescription() error = %v, wantErr %v", errs, tt.wantErr) + } + }) + } +} + +func TestTypesAreCapitalized(t *testing.T) { + tests := []struct { + name string + schema string + wantErr bool + }{ + { + "types_are_Capitalized", + `type User { + name: String! + }`, + false, + }, + { + "types_are_lowercased", + `type user { + name: String! + }`, + true, + }, + { + "types_are_mixed", + `type User { + name: String! + } + type query { + me: User! + }`, + true, + }, + { + "extended_type_is_lowercased", + `extend type user { + name: String! + }`, + true, + }, + { + "extended_types_is_Titlecased", + `extend type User { + name: String! + }`, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schemaDoc, parseErr := parser.ParseSchema(&ast.Source{ + Input: tt.schema, + }) + if parseErr != nil { + t.Fatalf("TypesAreCapitalized() invalid input; error = %v", parseErr) + } + if errs := TypesAreCapitalized(schemaDoc); (errs.Len() != 0) != tt.wantErr { + t.Errorf("TypesAreCapitalized() error = %v, wantErr %v", errs, tt.wantErr) + } + }) + } +} + +func TestTypesHaveDescription(t *testing.T) { + t.Run("something", func(t *testing.T) { + tests := []struct { + name string + schema string + wantErr bool + }{ + { + "type_without_description", + ` + type User { + name: String! + } + `, + true, + }, + { + "type_with_description", + ` + "User represent the person calling the endpoint" + type User { + name: String! + } + `, + false, + }, + { + "extended_type_without_description", + ` + extend type User { + name: String! + } + `, + false, + }, + { + "some_of_the_types_without_description", + ` + "User represent the person calling the endpoint" + type User { + name: String! + } + type Query { + me: User! + } + `, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schemaDoc, parseErr := parser.ParseSchema(&ast.Source{ + Input: tt.schema, + }) + if parseErr != nil { + t.Fatalf("TypesHaveDescription() invalid input; error = %v", parseErr) + } + if errs := TypesHaveDescription(schemaDoc); (errs.Len() != 0) != tt.wantErr { + t.Errorf("TypesHaveDescription() error = %v, wantErr %v", errs, tt.wantErr) + } + }) + } + }) +} + +func TestRelayConnectionTypesSpec(t *testing.T) { + tests := []struct { + name string + schema string + wantErr bool + }{ + { + "type_connection_non_object_type", + ` + interface UserConnection { + edges: [User] + pageInfo: PageInfo! + } + `, + true, + }, + { + "type_connection_without_edges_and_pageinfo_fields", + ` + type UserConnection { + id: Int! + } + `, + true, + }, + { + "type_connection_with_non_list_edges_type", + ` + type UserConnection { + edges: String + pageInfo: PageInfo! + } + `, + true, + }, + { + "type_connection_with_nullable_pageinfo_type_for_pageinfo_field", + ` + type UserConnection { + edges: [SomeObject] + pageInfo: PageInfo + } + `, + true, + }, + { + "type_connection_with_nonpageinfo_type_for_pageinfo_field", + ` + type UserConnection { + edges: [SomeObject] + pageInfo: String! + } + `, + true, + }, + { + "type_connection_with_valid_edges_pageinfo_fields", + ` + type UserConnection { + edges: [SomeObject] + pageInfo: PageInfo! + } + `, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schemaDoc, parseErr := parser.ParseSchema(&ast.Source{ + Input: tt.schema, + }) + if parseErr != nil { + t.Fatalf("RelayConnectionTypesSpec() invalid input; error = %v", parseErr) + } + if errs := RelayConnectionTypesSpec(schemaDoc); (errs.Len() > 0) != tt.wantErr { + t.Errorf("RelayConnectionTypesSpec() error = %v, wantErr %v", errs, tt.wantErr) + } + }) + } +} + +func TestRelayConnectionArgumentsSpec(t *testing.T) { + tests := []struct { + name string + schema string + wantErr bool + }{ + { + "field_connection_without_forward_or_backward_pagination", + ` + type User { + result: UserConnection + } + `, + true, + }, + { + "field_connection_with_forward_and_backward_pagination_and_non_nullable_first_argument", + ` + type User { + result(first: Int!, after: String, last: Int, before: String): UserConnection + } + `, + true, + }, + { + "field_connection_with_forward_and_backward_pagination_and_non_nullable_last_argument", + ` + type User { + result(first: Int, after: String, last: Int!, before: String): UserConnection + } + `, + true, + }, + { + "field_connection_with_non_int_first_argument", + ` + type User { + result(first: String, after: String): UserConnection + } + `, + true, + }, + { + "field_connection_with_non_int_last_argument", + ` + type User { + result(last: String, before: String): UserConnection + } + `, + true, + }, + { + "field_connection_with_only_first_argument", + ` + type User { + result(first: Int): UserConnection + } + `, + true, + }, + { + "field_connection_with_only_first_and_last_arguments", + ` + type User { + result(first: Int, last: Int): UserConnection + } + `, + true, + }, + { + "field_connection_with_only_first_and_before_arguments", + ` + type User { + result(first: Int, before: String): UserConnection + } + `, + true, + }, + { + "field_connection_with_only_last_and_after_arguments", + ` + type User { + result(last: Int, after: String): UserConnection + } + `, + true, + }, + { + "field_connection_with_valid_forward_pagination", + ` + type User { + result(first: Int, after: String): UserConnection + } + `, + false, + }, + { + "field_connection_with_valid_forward_pagination_with_nullable_first_argument", + ` + type User { + result(first: Int!, after: String): UserConnection + } + `, + false, + }, + { + "field_connection_with_valid_backward_pagination", + ` + type User { + result(last: Int, before: String): UserConnection + } + `, + false, + }, + { + "field_connection_with_valid_backward_pagination_with_nullable_last_argument", + ` + type User { + result(last: Int!, before: String): UserConnection + } + `, + false, + }, + { + "field_connection_with_valid_forward_and_backward_pagination", + ` + type User { + result(first: Int, after: String, last: Int, before: String): UserConnection + } + `, + false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + schemaDoc, parseErr := parser.ParseSchema(&ast.Source{ + Input: tt.schema, + }) + if parseErr != nil { + t.Fatalf("RelayConnectionArgumentsSpec() invalid input; error = %v", parseErr) + } + if errs := RelayConnectionArgumentsSpec(schemaDoc); (errs.Len() > 0) != tt.wantErr { + t.Errorf("RelayConnectionTypesSpec() error = %v, wantErr %v", errs, tt.wantErr) + } + }) + } +} diff --git a/pull_request_template.md b/pull_request_template.md new file mode 100644 index 0000000..450a2b5 --- /dev/null +++ b/pull_request_template.md @@ -0,0 +1,30 @@ +> Please review our [Code of Conduct](https://github.com/CrowdStrike/gql/CODE_OF_CONDUCT.md) +> and our [Contribution Guidelines](https://github.com/CrowdStrike/gql/CONTRIBUTING.md) before submitting a Pull Request. + +> REMOVE ALL PULL REQUEST HINTS BEFORE SUBMITTING + +## PULL REQUEST TITLE +Pull Request general description should go here. +> Please fill out all values and then remove any help text before submitting your PR. + +- [ ] Enhancement +- [ ] Major Feature update +- [ ] Bug fixes +- [ ] Breaking Change +- [ ] Documentation + +> Check the values above that match your PR and remove the remaining. + +## Added features and functionality ++ If your PR adds features or functionality, what should be included in the next release notes? + +## Issues resolved ++ Bug fix: Please list related bugs individually. Identifying the ticket or bug report in the PR description auto-updates the affected ticket and helps the community with ticket triage. For example: + +* Fixes https://github.com/CrowdStrike/gql/issues/1234 by doing foo +* Mitigates https://github.com/CrowdStrike/gql/issues/5678 by updating bar. ++ BE EXPLICIT in what you are resolving + +## Other ++ List any other details here ++ Documentation regarding your changes can also be listed here diff --git a/question.md b/question.md new file mode 100644 index 0000000..9d40f65 --- /dev/null +++ b/question.md @@ -0,0 +1,25 @@ +--- +name: Question +about: Create a question to help `gql` improve and help others within the community. +title: "[ QUESTION ] ..." +labels: question +assignees: '' + +--- + +**Description of your question** +A clear and concise description of what the question is. + +**To Reproduce** +Steps to reproduce the behavior you are seeing. + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Environment (please complete the following information):** + - OS: [e.g. Windows Server 2016, Windows 10] + - golang: [e.g. 1.17] + - gql: [e.g. 0.1.0] + +**Additional context** +Add any other context about the problem here. diff --git a/utils/util.go b/utils/util.go new file mode 100644 index 0000000..264ea87 --- /dev/null +++ b/utils/util.go @@ -0,0 +1,57 @@ +package utils + +import ( + "fmt" + "io" + "os" + "path/filepath" + + "github.com/vektah/gqlparser/v2/ast" + "github.com/vektah/gqlparser/v2/parser" +) + +// ParseSchema parse schema files and combine their sources +func ParseSchema(schemaFileContents map[string][]byte) (*ast.SchemaDocument, error) { + var sources []*ast.Source + for fn, sf := range schemaFileContents { + s := &ast.Source{ + Name: fn, + Input: string(sf), + } + sources = append(sources, s) + } + + schema, parseErr := parser.ParseSchemas(sources...) + if parseErr != nil { + return nil, parseErr + } + return schema, nil +} + +// ReadFiles read file contents from the give filepath +func ReadFiles(schemaFilePath string) (map[string][]byte, error) { + schemaFiles, err := filepath.Glob(schemaFilePath) + if err != nil { + fmt.Printf("error %v", err) + return nil, fmt.Errorf("matching files do not exist at path:%s, error:%v", schemaFilePath, err) + } + if len(schemaFiles) == 0 { + return nil, fmt.Errorf("matching file does not exist at path:%s", schemaFilePath) + } + schemaFileContents := make(map[string][]byte) + for _, filename := range schemaFiles { + fileObject, fileErr := os.Open(filename) // nolint:gosec + if fileErr != nil { + return nil, fmt.Errorf("failed to open file:%s on path:%s, error:%v", filename, schemaFilePath, err) + } + content, err := io.ReadAll(fileObject) + if err != nil { + return nil, fmt.Errorf("failed to read file:%s on path:%s, error:%v", filename, schemaFilePath, err) + } + if len(content) == 0 { + fmt.Printf("empty file=%s in path=%s", filename, schemaFilePath) + } + schemaFileContents[filename] = content + } + return schemaFileContents, nil +}