diff --git a/acto/__main__.py b/acto/__main__.py
index 9f16328747..16359bd74e 100644
--- a/acto/__main__.py
+++ b/acto/__main__.py
@@ -4,100 +4,101 @@
import logging
import os
import random
-import signal
import sys
import threading
-import time
from datetime import datetime
+from acto.engine import Acto, apply_testcase
+from acto.input.input import DeterministicInputModel
+from acto.lib.operator_config import OperatorConfig
+from acto.post_process.post_diff_test import PostDiffTest
+from acto.utils.error_handler import handle_excepthook, thread_excepthook
+from acto.utils.thread_logger import get_thread_logger
+
random.seed(0)
-start_time = time.time()
-workdir_path = 'testrun-%s' % datetime.now().strftime('%Y-%m-%d-%H-%M')
+workdir_path = f"testrun-{datetime.now().strftime('%Y-%m-%d-%H-%M')}"
parser = argparse.ArgumentParser(
- description='Automatic, Continuous Testing for k8s/openshift Operators')
-parser.add_argument('--workdir',
- dest='workdir_path',
- type=str,
- default=workdir_path,
- help='Working directory')
-parser.add_argument('--config', '-c', dest='config', help='Operator port config path')
+ description="Automatic, Continuous Testing for k8s/openshift Operators"
+)
parser.add_argument(
- '--cluster-runtime',
- '-r',
- dest='cluster_runtime',
+ "--workdir",
+ dest="workdir_path",
+ type=str,
+ default=workdir_path,
+ help="Working directory",
+)
+parser.add_argument(
+ "--config",
+ "-c",
+ dest="config",
+ help="Operator porting config path",
+ required=True,
+)
+parser.add_argument(
+ "--cluster-runtime",
+ "-r",
+ dest="cluster_runtime",
default="KIND",
- help='Cluster runtime for kubernetes, can be KIND (Default), K3D or MINIKUBE')
-parser.add_argument('--duration',
- '-d',
- dest='duration',
- required=False,
- help='Number of hours to run')
-parser.add_argument('--preload-images',
- dest='preload_images',
- nargs='*',
- help='Docker images to preload into Kind cluster')
-# Temporary solution before integrating controller-gen
-parser.add_argument('--helper-crd',
- dest='helper_crd',
- help='generated CRD file that helps with the input generation')
-parser.add_argument('--context', dest='context', help='Cached context data')
-parser.add_argument('--num-workers',
- dest='num_workers',
- type=int,
- default=1,
- help='Number of concurrent workers to run Acto with')
-parser.add_argument('--num-cases',
- dest='num_cases',
- type=int,
- default=1,
- help='Number of testcases to bundle each time')
-parser.add_argument('--learn', dest='learn', action='store_true', help='Learn mode')
-
-parser.add_argument('--additional-semantic',
- dest='additional_semantic',
- action='store_true',
- help='Run additional semantic testcases')
-parser.add_argument('--delta-from', dest='delta_from', help='Delta from')
-parser.add_argument('--notify-crash',
- dest='notify_crash',
- action='store_true',
- help='Submit a google form response to notify')
-parser.add_argument('--learn-analysis',
- dest='learn_analysis_only',
- action='store_true',
- help='Only learn analysis')
-parser.add_argument('--dryrun',
- dest='dryrun',
- action='store_true',
- help='Only generate test cases without executing them')
-parser.add_argument('--checkonly', action='store_true')
+ help="Cluster runtime for kubernetes, can be KIND (Default), K3D or MINIKUBE",
+)
+parser.add_argument("--context", dest="context", help="Cached context data")
+parser.add_argument(
+ "--num-workers",
+ dest="num_workers",
+ type=int,
+ default=1,
+ help="Number of concurrent workers to run Acto with",
+)
+parser.add_argument(
+ "--num-cases",
+ dest="num_cases",
+ type=int,
+ default=1,
+ help="Number of testcases to bundle each time",
+)
+parser.add_argument(
+ "--learn", dest="learn", action="store_true", help="Learn mode"
+)
+parser.add_argument("--delta-from", dest="delta_from", help="Delta from")
+parser.add_argument(
+ "--notify-crash",
+ dest="notify_crash",
+ action="store_true",
+ help="Submit a google form response to notify",
+)
+parser.add_argument(
+ "--learn-analysis",
+ dest="learn_analysis_only",
+ action="store_true",
+ help="Only learn analysis",
+)
+parser.add_argument(
+ "--dryrun",
+ dest="dryrun",
+ action="store_true",
+ help="Only generate test cases without executing them",
+)
+parser.add_argument("--checkonly", action="store_true")
args = parser.parse_args()
os.makedirs(args.workdir_path, exist_ok=True)
# Setting up log infra
logging.basicConfig(
- filename=os.path.join(args.workdir_path, 'test.log'),
+ filename=os.path.join(args.workdir_path, "test.log"),
level=logging.DEBUG,
- filemode='w',
- format='%(asctime)s %(levelname)-7s, %(name)s, %(filename)-9s:%(lineno)d, %(message)s')
+ filemode="w",
+ format="%(asctime)s %(levelname)-7s, %(name)s, %(filename)-9s:%(lineno)d, %(message)s",
+)
logging.getLogger("kubernetes").setLevel(logging.ERROR)
logging.getLogger("sh").setLevel(logging.ERROR)
-with open(args.config, 'r') as config_file:
+with open(args.config, "r", encoding="utf-8") as config_file:
config = json.load(config_file)
- if 'monkey_patch' in config:
- importlib.import_module(config['monkey_patch'])
-
-from acto import common
-from acto.engine import Acto, apply_testcase
-from acto.input.input import DeterministicInputModel, InputModel
-from acto.lib.operator_config import OperatorConfig
-from acto.post_process.post_diff_test import PostDiffTest
-from acto.utils.error_handler import handle_excepthook, thread_excepthook
-from acto.utils.thread_logger import get_thread_logger
+ if "monkey_patch" in config:
+ importlib.import_module(config["monkey_patch"])
logger = get_thread_logger(with_prefix=False)
@@ -106,69 +107,61 @@
threading.excepthook = thread_excepthook
if args.notify_crash:
- logger.critical('Crash notification should be enabled in config.yaml')
+ logger.critical("Crash notification should be enabled in config.yaml")
-with open(args.config, 'r') as config_file:
+with open(args.config, "r", encoding="utf-8") as config_file:
config = json.load(config_file)
- if 'monkey_patch' in config:
- del config['monkey_patch']
- config = OperatorConfig(**config)
-logger.info('Acto started with [%s]' % sys.argv)
-logger.info('Operator config: %s', config)
+ if "monkey_patch" in config:
+ del config["monkey_patch"]
+ config = OperatorConfig.model_validate(config)
+logger.info("Acto started with [%s]", sys.argv)
+logger.info("Operator config: %s", config)
# Preload frequently used images to amid ImagePullBackOff
if args.preload_images:
- logger.info('%s will be preloaded into Kind cluster', args.preload_images)
-
-# register timeout to automatically stop after # hours
-if args.duration != None:
- signal.signal(signal.SIGALRM, common.timeout_handler)
- signal.alarm(int(args.duration) * 60 * 60)
+ logger.info("%s will be preloaded into Kind cluster", args.preload_images)
-if args.context == None:
- context_cache = os.path.join(os.path.dirname(config.seed_custom_resource), 'context.json')
+if args.context is None:
+ context_cache = os.path.join(
+ os.path.dirname(config.seed_custom_resource), "context.json"
+ )
else:
context_cache = args.context
-# Initialize input model and the apply testcase function
-# input_model = InputModel(context_cache['crd']['body'], config.example_dir,
-# args.num_workers, args.num_cases, None)
-input_model = DeterministicInputModel
apply_testcase_f = apply_testcase
-is_reproduce = False
start_time = datetime.now()
-acto = Acto(workdir_path=args.workdir_path,
- operator_config=config,
- cluster_runtime=args.cluster_runtime,
- preload_images_=args.preload_images,
- context_file=context_cache,
- helper_crd=args.helper_crd,
- num_workers=args.num_workers,
- num_cases=args.num_cases,
- dryrun=args.dryrun,
- analysis_only=args.learn_analysis_only,
- is_reproduce=is_reproduce,
- input_model=input_model,
- apply_testcase_f=apply_testcase_f,
- delta_from=args.delta_from,
- focus_fields=config.focus_fields,)
+acto = Acto(
+ workdir_path=args.workdir_path,
+ operator_config=config,
+ cluster_runtime=args.cluster_runtime,
+ preload_images_=args.preload_images,
+ context_file=context_cache,
+ helper_crd=args.helper_crd,
+ num_workers=args.num_workers,
+ num_cases=args.num_cases,
+ dryrun=args.dryrun,
+ analysis_only=args.learn_analysis_only,
+ is_reproduce=False,
+ input_model=DeterministicInputModel,
+ apply_testcase_f=apply_testcase_f,
+ delta_from=args.delta_from,
+ focus_fields=config.focus_fields,
+)
generation_time = datetime.now()
-logger.info('Acto initialization finished in %s', generation_time - start_time)
-if args.additional_semantic:
- acto.run(modes=[InputModel.ADDITIONAL_SEMANTIC])
-elif not args.learn:
- acto.run(modes=['normal'])
+logger.info("Acto initialization finished in %s", generation_time - start_time)
+if not args.learn:
+ acto.run(modes=["normal"])
normal_finish_time = datetime.now()
-logger.info('Acto normal run finished in %s', normal_finish_time - start_time)
-logger.info('Start post processing steps')
+logger.info("Acto normal run finished in %s", normal_finish_time - start_time)
+logger.info("Start post processing steps")
# Post processing
-post_diff_test_dir = os.path.join(args.workdir_path, 'post_diff_test')
+post_diff_test_dir = os.path.join(args.workdir_path, "post_diff_test")
p = PostDiffTest(testrun_dir=args.workdir_path, config=config)
if not args.checkonly:
p.post_process(post_diff_test_dir, num_workers=args.num_workers)
p.check(post_diff_test_dir, num_workers=args.num_workers)
end_time = datetime.now()
-logger.info('Acto end to end finished in %s', end_time - start_time)
\ No newline at end of file
+logger.info("Acto end to end finished in %s", end_time - start_time)
diff --git a/acto/lib/operator_config.py b/acto/lib/operator_config.py
index 445f6d633c..9d50e37006 100644
--- a/acto/lib/operator_config.py
+++ b/acto/lib/operator_config.py
@@ -1,23 +1,24 @@
-from typing import Dict, List, Optional
+from typing import Optional
-from pydantic import BaseModel, Field
+import pydantic
DELEGATED_NAMESPACE = "__DELEGATED__"
-class ApplyStep(BaseModel, extra="forbid"):
+class ApplyStep(pydantic.BaseModel, extra="forbid"):
"""Configuration for each step of kubectl apply"""
- file: str = Field(description="Path to the file for kubectl apply")
- operator: bool = Field(
+ file: str = pydantic.Field(description="Path to the file for kubectl apply")
+ operator: bool = pydantic.Field(
description="If the file contains the operator deployment",
default=False,
)
- operator_container_name: Optional[str] = Field(
- description="The container name of the operator in the operator pod",
+ operator_container_name: Optional[str] = pydantic.Field(
+ description="The container name of the operator in the operator pod, "
+ "required if there are multiple containers in the operator pod",
default=None,
)
- namespace: Optional[str] = Field(
+ namespace: Optional[str] = pydantic.Field(
description="Namespace for applying the file. If not specified, "
+ "use the namespace in the file or Acto namespace. "
+ "If set to null, use the namespace in the file",
@@ -25,113 +26,116 @@ class ApplyStep(BaseModel, extra="forbid"):
)
-class WaitStep(BaseModel, extra="forbid"):
+class WaitStep(pydantic.BaseModel, extra="forbid"):
"""Configuration for each step of waiting for the operator"""
- duration: int = Field(
+ duration: int = pydantic.Field(
description="Wait for the specified seconds", default=10
)
-class DeployStep(BaseModel, extra="forbid"):
+class DeployStep(pydantic.BaseModel, extra="forbid"):
"""A step of deploying a resource"""
- apply: ApplyStep = Field(
+ apply: ApplyStep = pydantic.Field(
description="Configuration for each step of kubectl apply", default=None
)
- wait: WaitStep = Field(
+ wait: WaitStep = pydantic.Field(
description="Configuration for each step of waiting for the operator",
default=None,
)
# TODO: Add support for helm and kustomize
- # helm: str = Field(
+ # helm: str = pydantic.Field(
# description="Path to the file for helm install")
- # kustomize: str = Field(
+ # kustomize: str = pydantic.Field(
# description="Path to the file for kustomize build")
-class DeployConfig(BaseModel, extra="forbid"):
+class DeployConfig(pydantic.BaseModel, extra="forbid"):
"""Configuration for deploying the operator"""
- steps: List[DeployStep] = Field(
+ steps: list[DeployStep] = pydantic.Field(
description="Steps to deploy the operator", min_length=1
)
-class AnalysisConfig(BaseModel, extra="forbid"):
+class AnalysisConfig(pydantic.BaseModel, extra="forbid"):
"Configuration for static analysis"
- github_link: str = Field(
+ github_link: str = pydantic.Field(
description="HTTPS URL for cloning the operator repo"
)
- commit: str = Field(
+ commit: str = pydantic.Field(
description="Commit hash to specify the version to conduct static analysis"
)
- type: str = Field(description="Type name of the CR")
- package: str = Field(
+ type: str = pydantic.Field(description="Type name of the CR")
+ package: str = pydantic.Field(
description="Package name in which the type of the CR is defined"
)
- entrypoint: Optional[str] = Field(
+ entrypoint: Optional[str] = pydantic.Field(
description="The relative path of the main package for the operator, "
+ "required if the main is not in the root directory"
)
-class KubernetesEngineConfig(BaseModel, extra="forbid"):
+class KubernetesEngineConfig(pydantic.BaseModel, extra="forbid"):
"""Configuration for Kubernetes"""
- feature_gates: Dict[str, bool] = Field(
+ feature_gates: dict[str, bool] = pydantic.Field(
description="Path to the feature gates file", default=None
)
-class OperatorConfig(BaseModel, extra="forbid"):
+class OperatorConfig(pydantic.BaseModel, extra="forbid"):
"""Configuration for porting operators to Acto"""
deploy: DeployConfig
- analysis: Optional[AnalysisConfig] = Field(
+ analysis: Optional[AnalysisConfig] = pydantic.Field(
default=None, description="Configuration for static analysis"
)
- seed_custom_resource: str = Field(description="Path to the seed CR file")
- num_nodes: int = Field(
+ seed_custom_resource: str = pydantic.Field(
+ description="Path to the seed CR file"
+ )
+ num_nodes: int = pydantic.Field(
description="Number of workers in the Kubernetes cluster", default=4
)
- wait_time: int = Field(
+ wait_time: int = pydantic.Field(
description="Timeout duration (seconds) for the resettable timer for system convergence",
default=60,
)
collect_coverage: bool = False
- custom_oracle: Optional[str] = Field(
+ custom_oracle: Optional[str] = pydantic.Field(
default=None, description="Path to the custom oracle file"
)
- diff_ignore_fields: Optional[List[str]] = Field(default_factory=list)
- kubernetes_version: str = Field(
- default="v1.22.9", description="Kubernetes version"
+ diff_ignore_fields: Optional[list[str]] = pydantic.Field(
+ default_factory=list
+ )
+ kubernetes_version: str = pydantic.Field(
+ default="v1.28.0", description="Kubernetes version"
)
- kubernetes_engine: KubernetesEngineConfig = Field(
+ kubernetes_engine: KubernetesEngineConfig = pydantic.Field(
default=KubernetesEngineConfig(),
description="Configuration for the Kubernetes engine",
)
-
- monkey_patch: Optional[str] = Field(
+ monkey_patch: Optional[str] = pydantic.Field(
default=None, description="Path to the monkey patch file"
)
- custom_module: Optional[str] = Field(
+ custom_module: Optional[str] = pydantic.Field(
default=None,
description="Path to the custom module, in the Python module path format",
)
- crd_name: Optional[str] = Field(default=None, description="Name of the CRD")
- k8s_fields: Optional[str] = Field(
- default=None, description="Path to the k8s fields file"
+ crd_name: Optional[str] = pydantic.Field(
+ default=None,
+ description="Name of the CRD, required if there are multiple CRDs",
)
- example_dir: Optional[str] = Field(
+ example_dir: Optional[str] = pydantic.Field(
default=None, description="Path to the example dir"
)
- context: Optional[str] = Field(
+ context: Optional[str] = pydantic.Field(
default=None, description="Path to the context file"
)
- focus_fields: Optional[List[List[str]]] = Field(
+ focus_fields: Optional[list[list[str]]] = pydantic.Field(
default=None, description="List of focus fields"
)
diff --git a/data/strimzi-kafka-operator/config.json b/data/strimzi-kafka-operator/config.json
index 6d7473deac..1db5734ea4 100644
--- a/data/strimzi-kafka-operator/config.json
+++ b/data/strimzi-kafka-operator/config.json
@@ -11,6 +11,5 @@
},
"crd_name": "kafkas.kafka.strimzi.io",
"seed_custom_resource": "data/strimzi-kafka-operator/cr.yaml",
- "k8s_fields": "data.strimzi-kafka-operator.k8s_mapping",
- "monkey_patch": "data.strimzi-kafka-operator.monkey_patch"
-}
\ No newline at end of file
+ "k8s_fields": "data.strimzi-kafka-operator.k8s_mapping"
+}
diff --git a/docs/port.md b/docs/port.md
index 66805b1b47..13e4c34896 100644
--- a/docs/port.md
+++ b/docs/port.md
@@ -1,7 +1,7 @@
# Testing a new operator
## Porting an operator to Acto
-To port a new operator to Acto and test it, users would need to create a configuration file in JSON
+To port a new operator to Acto and test it, users would need to create a configuration file in JSON
format following the steps below.
### Providing the steps to deploy the operator
@@ -9,7 +9,7 @@ The minimum requirement for Acto to test an operator is to provide a way to depl
Acto supports three different ways for specifying the deployment method: YAML, Helm, and Kustomize.
(Helm and Kustomize is lacking support right now, please first use YAML)
-To specify operators' deployment method in a YAML way, users need to bundle all the required
+To specify operators' deployment method in a YAML way, users need to bundle all the required
resources into a YAML file, e.g. Namespace, ClusterRole, ServiceAccount, and Deployment.
Deploying operator can be expressed as a sequence of steps to be applied through
@@ -52,7 +52,7 @@ In case there are more than one container in the operator Pod (e.g. metrics expo
Full JsonSchema for the deploy property
-
+
```json
"deploy": {
"additionalProperties": false,
@@ -62,6 +62,7 @@ In case there are more than one container in the operator Pod (e.g. metrics expo
"description": "Steps to deploy the operator",
"items": {
"additionalProperties": false,
+ "description": "A step of deploying a resource",
"properties": {
"apply": {
"allOf": [
@@ -90,7 +91,7 @@ In case there are more than one container in the operator Pod (e.g. metrics expo
}
],
"default": null,
- "description": "The container name of the operator in the operator pod",
+ "description": "The container name of the operator in the operator pod, required if there are multiple containers in the operator pod",
"title": "Operator Container Name"
},
"namespace": {
@@ -151,8 +152,9 @@ In case there are more than one container in the operator Pod (e.g. metrics expo
],
"title": "DeployConfig",
"type": "object"
-},
+}
```
+
### Providing the name of the CRD to be tested
@@ -161,7 +163,7 @@ Some operator developers define separate CRDs for other purposes, e.g., backup t
In case there are more than one CRD in the deploy steps, you need to specify the full name of
the CRD to be tested.
-Specify the name of the CRD to be tested in the configuration through the `crd_name` property.
+Specify the name of the CRD to be tested in the configuration through the `crd_name` property.
E.g.:
```json
{
@@ -170,7 +172,7 @@ E.g.:
```
### Providing a seed CR for Acto to start with
-Provide a sample CR which will be used by Acto as the seed.
+Provide a sample CR which will be used by Acto as the seed.
This can be any valid CR, usually operator repos contain multiple sample CRs.
Specify this through the `seed_custom_resource` property in the configuration.
@@ -201,6 +203,654 @@ Example:
}
```
+
+ Full JsonSchema for the Operator Config
+
+ ```json
+{
+ "$defs": {
+ "AnalysisConfig": {
+ "additionalProperties": false,
+ "description": "Configuration for static analysis",
+ "properties": {
+ "github_link": {
+ "description": "HTTPS URL for cloning the operator repo",
+ "title": "Github Link",
+ "type": "string"
+ },
+ "commit": {
+ "description": "Commit hash to specify the version to conduct static analysis",
+ "title": "Commit",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type name of the CR",
+ "title": "Type",
+ "type": "string"
+ },
+ "package": {
+ "description": "Package name in which the type of the CR is defined",
+ "title": "Package",
+ "type": "string"
+ },
+ "entrypoint": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "description": "The relative path of the main package for the operator, required if the main is not in the root directory",
+ "title": "Entrypoint"
+ }
+ },
+ "required": [
+ "github_link",
+ "commit",
+ "type",
+ "package",
+ "entrypoint"
+ ],
+ "title": "AnalysisConfig",
+ "type": "object"
+ },
+ "ApplyStep": {
+ "additionalProperties": false,
+ "description": "Configuration for each step of kubectl apply",
+ "properties": {
+ "file": {
+ "description": "Path to the file for kubectl apply",
+ "title": "File",
+ "type": "string"
+ },
+ "operator": {
+ "default": false,
+ "description": "If the file contains the operator deployment",
+ "title": "Operator",
+ "type": "boolean"
+ },
+ "operator_container_name": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The container name of the operator in the operator pod, required if there are multiple containers in the operator pod",
+ "title": "Operator Container Name"
+ },
+ "namespace": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": "__DELEGATED__",
+ "description": "Namespace for applying the file. If not specified, use the namespace in the file or Acto namespace. If set to null, use the namespace in the file",
+ "title": "Namespace"
+ }
+ },
+ "required": [
+ "file"
+ ],
+ "title": "ApplyStep",
+ "type": "object"
+ },
+ "DeployConfig": {
+ "additionalProperties": false,
+ "description": "Configuration for deploying the operator",
+ "properties": {
+ "steps": {
+ "description": "Steps to deploy the operator",
+ "items": {
+ "additionalProperties": false,
+ "description": "A step of deploying a resource",
+ "properties": {
+ "apply": {
+ "allOf": [
+ {
+ "additionalProperties": false,
+ "description": "Configuration for each step of kubectl apply",
+ "properties": {
+ "file": {
+ "description": "Path to the file for kubectl apply",
+ "title": "File",
+ "type": "string"
+ },
+ "operator": {
+ "default": false,
+ "description": "If the file contains the operator deployment",
+ "title": "Operator",
+ "type": "boolean"
+ },
+ "operator_container_name": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The container name of the operator in the operator pod, required if there are multiple containers in the operator pod",
+ "title": "Operator Container Name"
+ },
+ "namespace": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": "__DELEGATED__",
+ "description": "Namespace for applying the file. If not specified, use the namespace in the file or Acto namespace. If set to null, use the namespace in the file",
+ "title": "Namespace"
+ }
+ },
+ "required": [
+ "file"
+ ],
+ "title": "ApplyStep",
+ "type": "object"
+ }
+ ],
+ "default": null,
+ "description": "Configuration for each step of kubectl apply"
+ },
+ "wait": {
+ "allOf": [
+ {
+ "additionalProperties": false,
+ "description": "Configuration for each step of waiting for the operator",
+ "properties": {
+ "duration": {
+ "default": 10,
+ "description": "Wait for the specified seconds",
+ "title": "Duration",
+ "type": "integer"
+ }
+ },
+ "title": "WaitStep",
+ "type": "object"
+ }
+ ],
+ "default": null,
+ "description": "Configuration for each step of waiting for the operator"
+ }
+ },
+ "title": "DeployStep",
+ "type": "object"
+ },
+ "minItems": 1,
+ "title": "Steps",
+ "type": "array"
+ }
+ },
+ "required": [
+ "steps"
+ ],
+ "title": "DeployConfig",
+ "type": "object"
+ },
+ "DeployStep": {
+ "additionalProperties": false,
+ "description": "A step of deploying a resource",
+ "properties": {
+ "apply": {
+ "allOf": [
+ {
+ "additionalProperties": false,
+ "description": "Configuration for each step of kubectl apply",
+ "properties": {
+ "file": {
+ "description": "Path to the file for kubectl apply",
+ "title": "File",
+ "type": "string"
+ },
+ "operator": {
+ "default": false,
+ "description": "If the file contains the operator deployment",
+ "title": "Operator",
+ "type": "boolean"
+ },
+ "operator_container_name": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The container name of the operator in the operator pod, required if there are multiple containers in the operator pod",
+ "title": "Operator Container Name"
+ },
+ "namespace": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": "__DELEGATED__",
+ "description": "Namespace for applying the file. If not specified, use the namespace in the file or Acto namespace. If set to null, use the namespace in the file",
+ "title": "Namespace"
+ }
+ },
+ "required": [
+ "file"
+ ],
+ "title": "ApplyStep",
+ "type": "object"
+ }
+ ],
+ "default": null,
+ "description": "Configuration for each step of kubectl apply"
+ },
+ "wait": {
+ "allOf": [
+ {
+ "additionalProperties": false,
+ "description": "Configuration for each step of waiting for the operator",
+ "properties": {
+ "duration": {
+ "default": 10,
+ "description": "Wait for the specified seconds",
+ "title": "Duration",
+ "type": "integer"
+ }
+ },
+ "title": "WaitStep",
+ "type": "object"
+ }
+ ],
+ "default": null,
+ "description": "Configuration for each step of waiting for the operator"
+ }
+ },
+ "title": "DeployStep",
+ "type": "object"
+ },
+ "KubernetesEngineConfig": {
+ "additionalProperties": false,
+ "description": "Configuration for Kubernetes",
+ "properties": {
+ "feature_gates": {
+ "additionalProperties": {
+ "type": "boolean"
+ },
+ "default": null,
+ "description": "Path to the feature gates file",
+ "title": "Feature Gates",
+ "type": "object"
+ }
+ },
+ "title": "KubernetesEngineConfig",
+ "type": "object"
+ },
+ "WaitStep": {
+ "additionalProperties": false,
+ "description": "Configuration for each step of waiting for the operator",
+ "properties": {
+ "duration": {
+ "default": 10,
+ "description": "Wait for the specified seconds",
+ "title": "Duration",
+ "type": "integer"
+ }
+ },
+ "title": "WaitStep",
+ "type": "object"
+ }
+ },
+ "additionalProperties": false,
+ "description": "Configuration for porting operators to Acto",
+ "properties": {
+ "deploy": {
+ "additionalProperties": false,
+ "description": "Configuration for deploying the operator",
+ "properties": {
+ "steps": {
+ "description": "Steps to deploy the operator",
+ "items": {
+ "additionalProperties": false,
+ "description": "A step of deploying a resource",
+ "properties": {
+ "apply": {
+ "allOf": [
+ {
+ "additionalProperties": false,
+ "description": "Configuration for each step of kubectl apply",
+ "properties": {
+ "file": {
+ "description": "Path to the file for kubectl apply",
+ "title": "File",
+ "type": "string"
+ },
+ "operator": {
+ "default": false,
+ "description": "If the file contains the operator deployment",
+ "title": "Operator",
+ "type": "boolean"
+ },
+ "operator_container_name": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "The container name of the operator in the operator pod, required if there are multiple containers in the operator pod",
+ "title": "Operator Container Name"
+ },
+ "namespace": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": "__DELEGATED__",
+ "description": "Namespace for applying the file. If not specified, use the namespace in the file or Acto namespace. If set to null, use the namespace in the file",
+ "title": "Namespace"
+ }
+ },
+ "required": [
+ "file"
+ ],
+ "title": "ApplyStep",
+ "type": "object"
+ }
+ ],
+ "default": null,
+ "description": "Configuration for each step of kubectl apply"
+ },
+ "wait": {
+ "allOf": [
+ {
+ "additionalProperties": false,
+ "description": "Configuration for each step of waiting for the operator",
+ "properties": {
+ "duration": {
+ "default": 10,
+ "description": "Wait for the specified seconds",
+ "title": "Duration",
+ "type": "integer"
+ }
+ },
+ "title": "WaitStep",
+ "type": "object"
+ }
+ ],
+ "default": null,
+ "description": "Configuration for each step of waiting for the operator"
+ }
+ },
+ "title": "DeployStep",
+ "type": "object"
+ },
+ "minItems": 1,
+ "title": "Steps",
+ "type": "array"
+ }
+ },
+ "required": [
+ "steps"
+ ],
+ "title": "DeployConfig",
+ "type": "object"
+ },
+ "analysis": {
+ "anyOf": [
+ {
+ "additionalProperties": false,
+ "description": "Configuration for static analysis",
+ "properties": {
+ "github_link": {
+ "description": "HTTPS URL for cloning the operator repo",
+ "title": "Github Link",
+ "type": "string"
+ },
+ "commit": {
+ "description": "Commit hash to specify the version to conduct static analysis",
+ "title": "Commit",
+ "type": "string"
+ },
+ "type": {
+ "description": "Type name of the CR",
+ "title": "Type",
+ "type": "string"
+ },
+ "package": {
+ "description": "Package name in which the type of the CR is defined",
+ "title": "Package",
+ "type": "string"
+ },
+ "entrypoint": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "description": "The relative path of the main package for the operator, required if the main is not in the root directory",
+ "title": "Entrypoint"
+ }
+ },
+ "required": [
+ "github_link",
+ "commit",
+ "type",
+ "package",
+ "entrypoint"
+ ],
+ "title": "AnalysisConfig",
+ "type": "object"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Configuration for static analysis"
+ },
+ "seed_custom_resource": {
+ "description": "Path to the seed CR file",
+ "title": "Seed Custom Resource",
+ "type": "string"
+ },
+ "num_nodes": {
+ "default": 4,
+ "description": "Number of workers in the Kubernetes cluster",
+ "title": "Num Nodes",
+ "type": "integer"
+ },
+ "wait_time": {
+ "default": 60,
+ "description": "Timeout duration (seconds) for the resettable timer for system convergence",
+ "title": "Wait Time",
+ "type": "integer"
+ },
+ "collect_coverage": {
+ "default": false,
+ "title": "Collect Coverage",
+ "type": "boolean"
+ },
+ "custom_oracle": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Path to the custom oracle file",
+ "title": "Custom Oracle"
+ },
+ "diff_ignore_fields": {
+ "anyOf": [
+ {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "title": "Diff Ignore Fields"
+ },
+ "kubernetes_version": {
+ "default": "v1.28.0",
+ "description": "Kubernetes version",
+ "title": "Kubernetes Version",
+ "type": "string"
+ },
+ "kubernetes_engine": {
+ "allOf": [
+ {
+ "additionalProperties": false,
+ "description": "Configuration for Kubernetes",
+ "properties": {
+ "feature_gates": {
+ "additionalProperties": {
+ "type": "boolean"
+ },
+ "default": null,
+ "description": "Path to the feature gates file",
+ "title": "Feature Gates",
+ "type": "object"
+ }
+ },
+ "title": "KubernetesEngineConfig",
+ "type": "object"
+ }
+ ],
+ "default": {
+ "feature_gates": null
+ },
+ "description": "Configuration for the Kubernetes engine"
+ },
+ "monkey_patch": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Path to the monkey patch file",
+ "title": "Monkey Patch"
+ },
+ "custom_module": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Path to the custom module, in the Python module path format",
+ "title": "Custom Module"
+ },
+ "crd_name": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Name of the CRD, required if there are multiple CRDs",
+ "title": "Crd Name"
+ },
+ "example_dir": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Path to the example dir",
+ "title": "Example Dir"
+ },
+ "context": {
+ "anyOf": [
+ {
+ "type": "string"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "Path to the context file",
+ "title": "Context"
+ },
+ "focus_fields": {
+ "anyOf": [
+ {
+ "items": {
+ "items": {
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "type": "array"
+ },
+ {
+ "type": "null"
+ }
+ ],
+ "default": null,
+ "description": "List of focus fields",
+ "title": "Focus Fields"
+ }
+ },
+ "required": [
+ "deploy",
+ "seed_custom_resource"
+ ],
+ "title": "OperatorConfig",
+ "type": "object"
+}
+ ```
+
+
+
## Run Acto's test campaign
After creating the configuration file for the operator,
users can start the test campaign by invoking Acto: