From 86aa0d3d616deb71aafbc58728565cb302db6602 Mon Sep 17 00:00:00 2001 From: Danilo Tuler Date: Wed, 18 Sep 2024 09:48:18 -0400 Subject: [PATCH] feat(cli): build based on configuration --- apps/cli/.eslintrc.cjs | 6 +- apps/cli/package.json | 3 +- apps/cli/src/baseCommand.ts | 7 + apps/cli/src/builder/directory.ts | 65 +++ apps/cli/src/builder/docker.ts | 151 +++++++ apps/cli/src/builder/empty.ts | 40 ++ apps/cli/src/builder/index.ts | 32 ++ apps/cli/src/builder/none.ts | 17 + apps/cli/src/builder/tar.ts | 49 +++ apps/cli/src/commands/build.ts | 421 +++++++------------- apps/cli/src/commands/shell.ts | 2 +- apps/cli/src/config.ts | 409 +++++++++++++++++++ apps/cli/src/exec.ts | 100 +++++ apps/cli/test/config.test.ts | 86 ++++ apps/cli/test/configs/default.toml | 2 + apps/cli/test/configs/drives/basic.toml | 7 + apps/cli/test/configs/drives/data.toml | 7 + apps/cli/test/configs/drives/empty.toml | 7 + apps/cli/test/configs/drives/none.toml | 6 + apps/cli/test/configs/drives/rives.toml | 15 + apps/cli/test/configs/drives/tar.toml | 6 + apps/cli/test/configs/full.toml | 45 +++ apps/cli/test/configs/machine/bootargs.toml | 0 apps/cli/test/configs/machine/no_boot.toml | 5 + apps/cli/test/tsconfig.json | 6 - apps/cli/tsconfig.build.json | 7 + apps/cli/tsconfig.json | 3 +- pnpm-lock.yaml | 9 + 28 files changed, 1232 insertions(+), 281 deletions(-) create mode 100644 apps/cli/src/builder/directory.ts create mode 100644 apps/cli/src/builder/docker.ts create mode 100644 apps/cli/src/builder/empty.ts create mode 100644 apps/cli/src/builder/index.ts create mode 100644 apps/cli/src/builder/none.ts create mode 100644 apps/cli/src/builder/tar.ts create mode 100644 apps/cli/src/config.ts create mode 100644 apps/cli/src/exec.ts create mode 100644 apps/cli/test/config.test.ts create mode 100644 apps/cli/test/configs/default.toml create mode 100644 apps/cli/test/configs/drives/basic.toml create mode 100644 apps/cli/test/configs/drives/data.toml create mode 100644 apps/cli/test/configs/drives/empty.toml create mode 100644 apps/cli/test/configs/drives/none.toml create mode 100644 apps/cli/test/configs/drives/rives.toml create mode 100644 apps/cli/test/configs/drives/tar.toml create mode 100644 apps/cli/test/configs/full.toml create mode 100644 apps/cli/test/configs/machine/bootargs.toml create mode 100644 apps/cli/test/configs/machine/no_boot.toml delete mode 100644 apps/cli/test/tsconfig.json create mode 100644 apps/cli/tsconfig.build.json diff --git a/apps/cli/.eslintrc.cjs b/apps/cli/.eslintrc.cjs index e12fe18a..e0e394b8 100644 --- a/apps/cli/.eslintrc.cjs +++ b/apps/cli/.eslintrc.cjs @@ -6,7 +6,11 @@ module.exports = { ], parser: "@typescript-eslint/parser", parserOptions: { - project: ["./tsconfig.eslint.json", "./tsconfig.json"], + project: [ + "./tsconfig.eslint.json", + "./tsconfig.json", + "./test/tsconfig.json", + ], tsconfigRootDir: __dirname, }, }; diff --git a/apps/cli/package.json b/apps/cli/package.json index 23ad0753..ed3a35f9 100644 --- a/apps/cli/package.json +++ b/apps/cli/package.json @@ -37,6 +37,7 @@ "ora": "^8.1.0", "progress-stream": "^2.0.0", "semver": "^7.6.3", + "smol-toml": "^1.3.0", "tmp": "^0.2.3", "viem": "^2.21.9" }, @@ -84,7 +85,7 @@ "clean": "rimraf dist", "codegen": "run-p codegen:wagmi", "codegen:wagmi": "wagmi generate", - "compile": "tsc -b", + "compile": "tsc -p tsconfig.build.json", "copy-files": "copyfiles -u 1 \"src/**/*.yaml\" \"src/**/*.env\" \"src/**/*.txt\" dist", "lint": "eslint \"src/**/*.ts*\"", "postpack": "rimraf oclif.manifest.json", diff --git a/apps/cli/src/baseCommand.ts b/apps/cli/src/baseCommand.ts index b01c4457..ff922e06 100644 --- a/apps/cli/src/baseCommand.ts +++ b/apps/cli/src/baseCommand.ts @@ -5,6 +5,7 @@ import fs from "fs"; import path from "path"; import { Address, Hash, getAddress, isHash } from "viem"; +import { Config, parse } from "./config.js"; import { authorityHistoryPairFactoryAddress, cartesiDAppFactoryAddress, @@ -54,6 +55,12 @@ export abstract class BaseCommand extends Command { return path.join(".cartesi", ...paths); } + protected getApplicationConfig(configPath: string): Config { + return fs.existsSync(configPath) + ? parse(fs.readFileSync(configPath).toString()) + : parse(""); + } + protected getMachineHash(): Hash | undefined { // read hash of the cartesi machine snapshot, if one exists const hashPath = this.getContextPath("image", "hash"); diff --git a/apps/cli/src/builder/directory.ts b/apps/cli/src/builder/directory.ts new file mode 100644 index 00000000..e9618e46 --- /dev/null +++ b/apps/cli/src/builder/directory.ts @@ -0,0 +1,65 @@ +import fs from "fs-extra"; +import path from "path"; +import { DirectoryDriveConfig } from "../config.js"; +import { execaDockerFallback } from "../exec.js"; + +export const build = async ( + name: string, + drive: DirectoryDriveConfig, + sdkImage: string, + destination: string, +): Promise => { + const filename = `${name}.${drive.format}`; + const blockSize = 4096; // fixed at 4k + const extraBlocks = Math.ceil(drive.extraSize / blockSize); + const extraSize = `+${extraBlocks}`; + + // copy directory to destination + const dest = path.join(destination, name); + await fs.mkdirp(dest); + await fs.copy(drive.directory, dest); + + try { + switch (drive.format) { + case "ext2": { + const command = "xgenext2fs"; + const args = [ + "--block-size", + blockSize.toString(), + "--faketime", + "--root", + name, + "--readjustment", + extraSize, + ]; + await execaDockerFallback(command, args, { + cwd: destination, + image: sdkImage, + }); + break; + } + case "sqfs": { + const compression = "lzo"; // make customizable? default is gzip + const command = "mksquashfs"; + const args = [ + "-all-time", + "0", + "-all-root", // XXX: should we use this? + "-noappend", + "-comp", + compression, + "-no-progress", + name, + filename, + ]; + await execaDockerFallback(command, args, { + cwd: destination, + image: sdkImage, + }); + } + } + } finally { + // delete copied + await fs.remove(dest); + } +}; diff --git a/apps/cli/src/builder/docker.ts b/apps/cli/src/builder/docker.ts new file mode 100644 index 00000000..e182724b --- /dev/null +++ b/apps/cli/src/builder/docker.ts @@ -0,0 +1,151 @@ +import { execa } from "execa"; +import fs from "fs-extra"; +import path from "path"; +import tmp from "tmp"; +import { DockerDriveConfig } from "../config.js"; +import { execaDockerFallback, spawnSyncDockerFallback } from "../exec.js"; +import { tarToExt } from "./index.js"; + +type ImageBuildOptions = Pick< + DockerDriveConfig, + "dockerfile" | "tags" | "target" +>; + +type ImageInfo = { + cmd: string[]; + entrypoint: string[]; + env: string[]; + workdir: string; +}; + +/** + * Build Docker image (linux/riscv64). Returns image id. + */ +const buildImage = async (options: ImageBuildOptions): Promise => { + const { dockerfile, tags, target } = options; + const buildResult = tmp.tmpNameSync(); + const args = [ + "buildx", + "build", + "--file", + dockerfile, + "--load", + "--iidfile", + buildResult, + ]; + + // set tags for the image built + args.push(...tags.map((tag) => ["--tag", tag]).flat()); + + if (target) { + args.push("--target", target); + } + + await execa("docker", [...args, process.cwd()], { stdio: "inherit" }); + return fs.readFileSync(buildResult, "utf8"); +}; + +/** + * Query the image using docker image inspect + * @param image image id or name + * @returns Information about the image + */ +const getImageInfo = async (image: string): Promise => { + const { stdout: jsonStr } = await execa("docker", [ + "image", + "inspect", + image, + ]); + // parse image info from docker inspect output + const [imageInfo] = JSON.parse(jsonStr); + + // validate image architecture (must be riscv64) + if (imageInfo["Architecture"] !== "riscv64") { + throw new Error( + `Invalid image Architecture: ${imageInfo["Architecture"]}. Expected riscv64`, + ); + } + + const info: ImageInfo = { + cmd: imageInfo["Config"]["Cmd"] ?? [], + entrypoint: imageInfo["Config"]["Entrypoint"] ?? [], + env: imageInfo["Config"]["Env"] || [], + workdir: imageInfo["Config"]["WorkingDir"], + }; + + return info; +}; + +export const build = async ( + name: string, + drive: DockerDriveConfig, + sdkImage: string, + destination: string, +): Promise => { + const { format } = drive; + + const ocitar = `${name}.oci.tar`; + const tar = `${name}.tar`; + const filename = `${name}.${format}`; + + // use pre-existing image or build docker image + const image = drive.image || (await buildImage(drive)); + + // get image info + const imageInfo = await getImageInfo(image); + + try { + // create OCI Docker tarball from Docker image + await execa("docker", ["image", "save", image, "-o", ocitar], { + cwd: destination, + }); + + // create rootfs tar from OCI tar + await spawnSyncDockerFallback("crane", ["export", "-", "-"], { + stdio: [ + fs.openSync(path.join(destination, ocitar), "r"), + fs.openSync(path.join(destination, tar), "w"), + "inherit", + ], + image: sdkImage, + }); + + switch (format) { + case "ext2": { + // create ext2 + await tarToExt(tar, filename, format, drive.extraSize, { + cwd: destination, + image: sdkImage, + }); + break; + } + case "sqfs": { + const compression = "lzo"; // make customizable? default is gzip + const command = "mksquashfs"; + const args = [ + "-tar", + "-all-time", + "0", + "-all-root", // XXX: should we use this? + "-noappend", + "-comp", + compression, + "-no-progress", + filename, + ]; + await execaDockerFallback(command, args, { + cwd: destination, + image: sdkImage, + inputFile: tar, + }); + break; + } + } + } finally { + // delete intermediate files + // await fs.remove(path.join(destination, ocitar)); + // await fs.remove(path.join(destination, tar)); + } + + return imageInfo; +}; diff --git a/apps/cli/src/builder/empty.ts b/apps/cli/src/builder/empty.ts new file mode 100644 index 00000000..861a6ba2 --- /dev/null +++ b/apps/cli/src/builder/empty.ts @@ -0,0 +1,40 @@ +import fs from "fs-extra"; +import path from "path"; +import { EmptyDriveConfig } from "../config.js"; +import { execaDockerFallback } from "../exec.js"; + +export const build = async ( + name: string, + drive: EmptyDriveConfig, + sdkImage: string, + destination: string, +): Promise => { + const filename = `${name}.${drive.format}`; + switch (drive.format) { + case "ext2": { + const blockSize = 4096; // fixed at 4k + const size = Math.ceil(drive.size / blockSize); // size in blocks + const command = "xgenext2fs"; + const args = [ + "--block-size", + blockSize.toString(), + "--faketime", + "--size-in-blocks", + size.toString(), + filename, + ]; + await execaDockerFallback(command, args, { + cwd: destination, + image: sdkImage, + }); + break; + } + case "raw": { + await fs.writeFile( + path.join(destination, filename), + Buffer.alloc(drive.size), + ); + break; + } + } +}; diff --git a/apps/cli/src/builder/index.ts b/apps/cli/src/builder/index.ts new file mode 100644 index 00000000..6ad19b73 --- /dev/null +++ b/apps/cli/src/builder/index.ts @@ -0,0 +1,32 @@ +import { execaDockerFallback, ExecaOptionsDockerFallback } from "../exec.js"; + +export { build as buildDirectory } from "./directory.js"; +export { build as buildDocker } from "./docker.js"; +export { build as buildEmpty } from "./empty.js"; +export { build as buildNone } from "./none.js"; +export { build as buildTar } from "./tar.js"; + +export const tarToExt = async ( + input: string, + output: string, + format: "ext2", + extraSize: number, + options: ExecaOptionsDockerFallback, +) => { + const blockSize = 4096; // fixed at 4k + const extraBlocks = Math.ceil(extraSize / blockSize); + const adjustment = `+${extraBlocks}`; + + const command = "xgenext2fs"; + const args = [ + "--block-size", + blockSize.toString(), + "--faketime", + "--readjustment", + adjustment.toString(), + "--tarball", + input, + output, + ]; + return execaDockerFallback(command, args, options); +}; diff --git a/apps/cli/src/builder/none.ts b/apps/cli/src/builder/none.ts new file mode 100644 index 00000000..2e0d8fea --- /dev/null +++ b/apps/cli/src/builder/none.ts @@ -0,0 +1,17 @@ +import fs from "fs-extra"; +import path from "path"; +import { ExistingDriveConfig, getDriveFormat } from "../config.js"; + +export const build = async ( + name: string, + drive: ExistingDriveConfig, + destination: string, +): Promise => { + // no need to build, drive already exists + const src = drive.filename; + const format = getDriveFormat(src); + const filename = path.join(destination, `${name}.${format}`); + + // just copy it + await fs.copyFile(src, filename); +}; diff --git a/apps/cli/src/builder/tar.ts b/apps/cli/src/builder/tar.ts new file mode 100644 index 00000000..4fad8e26 --- /dev/null +++ b/apps/cli/src/builder/tar.ts @@ -0,0 +1,49 @@ +import fs from "fs-extra"; +import path from "path"; +import { TarDriveConfig } from "../config.js"; +import { execaDockerFallback } from "../exec.js"; +import { tarToExt } from "./index.js"; + +export const build = async ( + name: string, + drive: TarDriveConfig, + sdkImage: string, + destination: string, +): Promise => { + const tar = `${name}.tar`; + const filename = `${name}.${drive.format}`; + + // copy input tar to destination directory (with drive name) + await fs.copy(drive.filename, path.join(destination, tar)); + + switch (drive.format) { + case "ext2": { + await tarToExt(tar, filename, drive.format, drive.extraSize, { + cwd: destination, + image: sdkImage, + }); + break; + } + case "sqfs": { + const compression = "lzo"; // make customizable? default is gzip + const command = "mksquashfs"; + const args = [ + "-tar", + "-all-time", + "0", + "-all-root", // XXX: should we use this? + "-noappend", + "-comp", + compression, + "-no-progress", + filename, + ]; + await execaDockerFallback(command, args, { + cwd: destination, + image: sdkImage, + inputFile: tar, + }); + break; + } + } +}; diff --git a/apps/cli/src/commands/build.ts b/apps/cli/src/commands/build.ts index 47a85dcd..5301f774 100644 --- a/apps/cli/src/commands/build.ts +++ b/apps/cli/src/commands/build.ts @@ -1,312 +1,193 @@ import { Flags } from "@oclif/core"; -import bytes from "bytes"; -import { execa } from "execa"; import fs from "fs-extra"; -import semver from "semver"; +import path from "path"; import tmp from "tmp"; - import { BaseCommand } from "../baseCommand.js"; -import { DEFAULT_TEMPLATES_BRANCH } from "./create.js"; - -type ImageBuildOptions = { - target?: string; -}; +import { + buildDirectory, + buildDocker, + buildEmpty, + buildNone, + buildTar, +} from "../builder/index.js"; +import { Config, DriveConfig } from "../config.js"; +import { execaDockerFallback } from "../exec.js"; type ImageInfo = { cmd: string[]; - dataSize: string; entrypoint: string[]; env: string[]; - ramSize: string; - sdkVersion: string; - sdkName: string; workdir: string; }; -const CARTESI_LABEL_PREFIX = "io.cartesi.rollups"; -const CARTESI_LABEL_RAM_SIZE = `${CARTESI_LABEL_PREFIX}.ram_size`; -const CARTESI_LABEL_DATA_SIZE = `${CARTESI_LABEL_PREFIX}.data_size`; -const CARTESI_DEFAULT_RAM_SIZE = "128Mi"; - -const CARTESI_LABEL_SDK_VERSION = `${CARTESI_LABEL_PREFIX}.sdk_version`; -const CARTESI_LABEL_SDK_NAME = `${CARTESI_LABEL_PREFIX}.sdk_name`; -const CARTESI_DEFAULT_SDK_VERSION = "0.9.0"; - -export default class BuildApplication extends BaseCommand< - typeof BuildApplication -> { - static summary = "Build application."; - - static description = - "Build application starting from a Dockerfile and ending with a snapshot of the corresponding Cartesi Machine already booted and yielded for the first time. This snapshot can be used to start a Cartesi node for the application using `run`. The process can also start from a Docker image built by the developer using `docker build` using the option `--from-image`"; - - static examples = [ - "<%= config.bin %> <%= command.id %>", - "<%= config.bin %> <%= command.id %> --from-image my-app", - ]; - - static args = {}; - - static flags = { - "from-image": Flags.string({ - summary: "skip docker build and start from this image.", - description: - "if the build process of the application Dockerfile needs more control the developer can build the image using the `docker build` command, and then start the build process of the Cartesi machine starting from that image.", - }), - target: Flags.string({ - summary: "target of docker multi-stage build.", - description: - "if the application Dockerfile uses a multi-stage strategy, and stage of the image to be exported as a Cartesi machine is not the last stage, use this parameter to specify the target stage.", - }), - }; - - /** - * Build DApp image (linux/riscv64). Returns image id. - * @param directory path of context containing Dockerfile - */ - private async buildImage(options: ImageBuildOptions): Promise { - const buildResult = tmp.tmpNameSync(); - this.debug( - `building docker image and writing result to ${buildResult}`, - ); - const args = ["buildx", "build", "--load", "--iidfile", buildResult]; - if (options.target) { - args.push("--target", options.target); +type DriveResult = ImageInfo | undefined | void; + +const buildDrive = async ( + name: string, + drive: DriveConfig, + sdkImage: string, + destination: string, +): Promise => { + switch (drive.builder) { + case "directory": { + return buildDirectory(name, drive, sdkImage, destination); } - - await execa("docker", [...args, process.cwd()], { stdio: "inherit" }); - return fs.readFileSync(buildResult, "utf8"); - } - - private async getImageInfo(image: string): Promise { - const { stdout: jsonStr } = await execa("docker", [ - "image", - "inspect", - image, - ]); - // parse image info from docker inspect output - const [imageInfo] = JSON.parse(jsonStr); - - // validate image architecture (must be riscv64) - if (imageInfo["Architecture"] !== "riscv64") { - throw new Error( - `Invalid image Architecture: ${imageInfo["Architecture"]}. Expected riscv64`, - ); + case "docker": { + return buildDocker(name, drive, sdkImage, destination); } - - const labels = imageInfo["Config"]["Labels"] || {}; - const info: ImageInfo = { - cmd: imageInfo["Config"]["Cmd"] ?? [], - dataSize: labels[CARTESI_LABEL_DATA_SIZE] ?? "10Mb", - entrypoint: imageInfo["Config"]["Entrypoint"] ?? [], - env: imageInfo["Config"]["Env"] || [], - ramSize: labels[CARTESI_LABEL_RAM_SIZE] ?? CARTESI_DEFAULT_RAM_SIZE, - sdkName: labels[CARTESI_LABEL_SDK_NAME] ?? "cartesi/sdk", - sdkVersion: - labels[CARTESI_LABEL_SDK_VERSION] ?? - CARTESI_DEFAULT_SDK_VERSION, - workdir: imageInfo["Config"]["WorkingDir"], - }; - - if (!info.entrypoint && !info.cmd) { - throw new Error("Undefined image ENTRYPOINT or CMD"); + case "empty": { + return buildEmpty(name, drive, sdkImage, destination); } - - // fail if using unsupported sdk version - if (!semver.valid(info.sdkVersion)) { - this.warn("sdk version is not a valid semver"); - } else if ( - info.sdkName == "cartesi/sdk" && - semver.lt(info.sdkVersion, CARTESI_DEFAULT_SDK_VERSION) - ) { - throw new Error(`Unsupported sdk version: ${info.sdkVersion} (used) < ${CARTESI_DEFAULT_SDK_VERSION} (minimum). -Update your application Dockerfile using one of the templates at https://github.com/cartesi/application-templates/tree/${DEFAULT_TEMPLATES_BRANCH} -`); + case "tar": { + return buildTar(name, drive, sdkImage, destination); } - - // warn for using default values - info.sdkVersion || - this.warn( - `Undefined ${CARTESI_LABEL_SDK_VERSION} label, defaulting to ${CARTESI_DEFAULT_SDK_VERSION}`, - ); - - info.ramSize || - this.warn( - `Undefined ${CARTESI_LABEL_RAM_SIZE} label, defaulting to ${CARTESI_DEFAULT_RAM_SIZE}`, - ); - - // validate data size value - if (bytes(info.dataSize) === null) { - throw new Error( - `Invalid ${CARTESI_LABEL_DATA_SIZE} value: ${info.dataSize}`, - ); + case "none": { + return buildNone(name, drive, destination); } - - // XXX: validate other values - - return info; } +}; - // saves the OCI Image to a tarball - private async createTarball( - image: string, - outputFilePath: string, - ): Promise { - // create docker tarball from app image - await execa("docker", ["image", "save", image, "-o", outputFilePath]); +const bootMachine = async ( + config: Config, + info: ImageInfo | undefined, + sdkImage: string, + destination: string, +) => { + const { machine } = config; + const { assertRollingTemplate, maxMCycle, noRollup, ramLength, ramImage } = + machine; + + // list of environment variables of docker image + const env = info?.env ?? []; + const envs = env.map( + (variable) => `--append-entrypoint=export "${variable}"`, + ); + + // bootargs from config string array + const bootargs = machine.bootargs.map( + (arg) => `--append-bootargs="${arg}"`, + ); + + // entrypoint from config or image info (Docker ENTRYPOINT + CMD) + const entrypoint = + machine.entrypoint ?? // takes priority + (info ? [...info.entrypoint, ...info.cmd].join(" ") : undefined); // ENTRYPOINT and CMD as a space separated string + + if (!entrypoint) { + throw new Error("Undefined machine entrypoint"); } - // this wraps the call to the sdk image with a one-shot approach - // the (inputPath, outputPath) signature will mount the input as a volume and copy the output with docker cp - private async sdkRun( - sdkImage: string, - cmd: string[], - inputPath: string, - outputPath: string, - ): Promise { - const { stdout: cid } = await execa("docker", [ - "container", - "create", - "--volume", - `./${inputPath}:/tmp/input`, - sdkImage, - ...cmd, - ]); - - await execa("docker", ["container", "start", "-a", cid], { - stdio: "inherit", - }); - - await execa("docker", [ - "container", - "cp", - `${cid}:/tmp/output`, - outputPath, - ]); - - await execa("docker", ["container", "stop", cid]); - await execa("docker", ["container", "rm", cid]); + const flashDrives = Object.entries(config.drives).map(([label, drive]) => { + const { format, mount, shared, user } = drive; + // TODO: filename should be absolute dir inside docker container + const filename = `${label}.${format}`; + const vars = [`label:${label}`, `filename:${filename}`]; + if (mount) { + vars.push(`mount:${mount}`); + } + if (user) { + vars.push(`user:${user}`); + } + if (shared) { + vars.push("shared"); + } + // don't specify start and length + return `--flash-drive=${vars.join(",")}`; + }); + + // command to change working directory if WORKDIR is defined + const command = "cartesi-machine"; + const args = [ + ...bootargs, + ...envs, + ...flashDrives, + `--ram-image=${ramImage}`, + `--ram-length=${ramLength}`, + "--final-hash", + "--store=image", + `--append-entrypoint=${entrypoint}`, + ]; + if (info?.workdir) { + args.push(`--append-init=WORKDIR="${info.workdir}"`); } - - // returns the command to create rootfs tarball from an OCI Image tarball - private static createRootfsTarCommand(): string[] { - const cmd = [ - "cat", - "/tmp/input", - "|", - "crane", - "export", - "-", // OCI Image from stdin - "-", // rootfs tarball to stdout - "|", - "bsdtar", - "-cf", - "/tmp/output", - "--format=gnutar", - "@/dev/stdin", // rootfs tarball from stdin - ]; - return ["/usr/bin/env", "bash", "-c", cmd.join(" ")]; + if (noRollup) { + args.push("--no-rollup"); } - - // returns the command to create ext2 from a rootfs - private static createExt2Command(extraBytes: number): string[] { - const blockSize = 4096; - const extraBlocks = Math.ceil(extraBytes / blockSize); - const extraSize = `+${extraBlocks}`; - - return [ - "xgenext2fs", - "--tarball", - "/tmp/input", - "--block-size", - blockSize.toString(), - "--faketime", - "-r", - extraSize, - "/tmp/output", - ]; + if (maxMCycle) { + args.push(`--max-mcycle=${maxMCycle.toString()}`); + } + if (assertRollingTemplate) { + args.push("--assert-rolling-template"); } - private static createMachineSnapshotCommand(info: ImageInfo): string[] { - const ramSize = info.ramSize; - const driveLabel = "root"; // XXX: does this need to be customizable? + return execaDockerFallback(command, args, { + cwd: destination, + image: sdkImage, + stdio: "inherit", + }); +}; - // list of environment variables of docker image - const envs = info.env.map((variable) => `--env=${variable}`); +export default class Build extends BaseCommand { + static summary = "Build application."; - // ENTRYPOINT and CMD as a space separated string - const entrypoint = [...info.entrypoint, ...info.cmd].join(" "); + static description = + "Build application by building Cartesi machine drives, configuring a machine and booting it"; - // command to change working directory if WORKDIR is defined - const cwd = info.workdir ? `--workdir=${info.workdir}` : ""; - return [ - "create_machine_snapshot", - `--ram-length=${ramSize}`, - `--drive-label=${driveLabel}`, - `--drive-filename=/tmp/input`, - `--output=/tmp/output`, - cwd, - ...envs, - `--entrypoint=${entrypoint}`, - ]; - } + static examples = ["<%= config.bin %> <%= command.id %>"]; - public async run(): Promise { - const { flags } = await this.parse(BuildApplication); + static flags = { + config: Flags.file({ + char: "c", + default: "cartesi.toml", + summary: "path to the configuration file", + }), + "drives-only": Flags.boolean({ + default: false, + summary: "only build drives", + }), + }; - const snapshotPath = this.getContextPath("image"); - const tarPath = this.getContextPath("image.tar"); - const gnuTarPath = this.getContextPath("image.gnutar"); - const ext2Path = this.getContextPath("image.ext2"); + public async run(): Promise { + const { flags } = await this.parse(Build); // clean up temp files we create along the process tmp.setGracefulCleanup(); - // use pre-existing image or build dapp image - const appImage = flags["from-image"] || (await this.buildImage(flags)); - - // prepare context directory - await fs.emptyDir(this.getContextPath()); // XXX: make it less error prone + // get application configuration from 'cartesi.toml' + const config = this.getApplicationConfig(flags.config); - // get and validate image info - const imageInfo = await this.getImageInfo(appImage); + // destination directory for image and intermediate files + const destination = path.resolve(this.getContextPath()); - // resolve sdk version - const sdkImage = `${imageInfo.sdkName}:${imageInfo.sdkVersion}`; + // prepare context directory + await fs.emptyDir(destination); // XXX: make it less error prone + + // start build of all drives simultaneously + const results = Object.entries(config.drives).reduce< + Record> + >((acc, [name, drive]) => { + acc[name] = buildDrive(name, drive, config.sdk, destination); + return acc; + }, {}); + + // await for all drives to be built + await Promise.all(Object.values(results)); + + if (flags["drives-only"]) { + // only build drives, so quit here + return; + } - try { - // create docker tarball for image specified - await this.createTarball(appImage, tarPath); + // get image info of root drive + const root = await results["root"]; + const imageInfo = root || undefined; - // create rootfs tar - await this.sdkRun( - sdkImage, - BuildApplication.createRootfsTarCommand(), - tarPath, - gnuTarPath, - ); + // path of machine snapshot + const snapshotPath = this.getContextPath("image"); - // create ext2 - await this.sdkRun( - sdkImage, - BuildApplication.createExt2Command( - bytes.parse(imageInfo.dataSize), - ), - gnuTarPath, - ext2Path, - ); + // create machine snapshot + await bootMachine(config, imageInfo, config.sdk, destination); - // create machine snapshot - await this.sdkRun( - sdkImage, - BuildApplication.createMachineSnapshotCommand(imageInfo), - ext2Path, - snapshotPath, - ); - await fs.chmod(snapshotPath, 0o755); - } finally { - await fs.remove(gnuTarPath); - await fs.remove(tarPath); - } + await fs.chmod(snapshotPath, 0o755); } } diff --git a/apps/cli/src/commands/shell.ts b/apps/cli/src/commands/shell.ts index 3c8a928b..b0423300 100644 --- a/apps/cli/src/commands/shell.ts +++ b/apps/cli/src/commands/shell.ts @@ -66,7 +66,7 @@ export default class Shell extends BaseCommand { const { flags } = await this.parse(Shell); // use pre-existing image or build dapp image - const ext2Path = this.getContextPath("image.ext2"); + const ext2Path = this.getContextPath("root.ext2"); if (!fs.existsSync(ext2Path)) { throw new Error( `machine not built, run '${this.config.bin} build'`, diff --git a/apps/cli/src/config.ts b/apps/cli/src/config.ts new file mode 100644 index 00000000..7e5e4393 --- /dev/null +++ b/apps/cli/src/config.ts @@ -0,0 +1,409 @@ +import bytes from "bytes"; +import os from "os"; +import { extname } from "path"; +import { TomlPrimitive, parse as parseToml } from "smol-toml"; + +/** + * Configuration for drives of a Cartesi Machine. A drive may already exist or be built by a builder + */ +const DEFAULT_FORMAT = "ext2"; +const DEFAULT_RAM = "128Mi"; +const DEFAULT_RAM_IMAGE_DOCKER = "/usr/share/cartesi-machine/images/linux.bin"; +const DEFAULT_RAM_IMAGE_LINUX = "/usr/share/cartesi-machine/images/linux.bin"; +const DEFAULT_RAM_IMAGE_MAC = + "/opt/homebrew/share/cartesi-machine/images/linux.bin"; +const DEFAULT_SDK = "cartesi/sdk:0.10.0"; + +type Builder = "directory" | "docker" | "empty" | "none" | "tar"; +type DriveFormat = "ext2" | "sqfs"; + +export type DirectoryDriveConfig = { + builder: "directory"; + extraSize: number; // default is 0 (no extra size) + format: DriveFormat; + directory: string; // required +}; + +export type DockerDriveConfig = { + builder: "docker"; + dockerfile: string; + extraSize: number; // default is 0 (no extra size) + format: DriveFormat; + image?: string; // default is to build an image from a Dockerfile + tags: string[]; // default is empty array + target?: string; // default is last stage of multi-stage +}; + +export type EmptyDriveConfig = { + builder: "empty"; + format: "ext2" | "raw"; + size: number; // in bytes +}; + +export type ExistingDriveConfig = { + builder: "none"; + filename: string; // required + format: DriveFormat; +}; + +export type TarDriveConfig = { + builder: "tar"; + filename: string; // required + format: DriveFormat; + extraSize: number; // default is 0 (no extra size) +}; + +export type DriveConfig = ( + | DirectoryDriveConfig + | DockerDriveConfig + | EmptyDriveConfig + | ExistingDriveConfig + | TarDriveConfig +) & { + mount?: string | boolean; // default given by cartesi-machine + shared?: boolean; // default given by cartesi-machine + user?: string; // default given by cartesi-machine +}; + +export type MachineConfig = { + assertRollingTemplate?: boolean; // default given by cartesi-machine + bootargs: string[]; + entrypoint?: string; + maxMCycle?: bigint; // default given by cartesi-machine + noRollup?: boolean; // default given by cartesi-machine + ramLength: string; + ramImage: string; +}; + +export type Config = { + drives: Record; + machine: MachineConfig; + sdk: string; +}; + +type TomlTable = { [key: string]: TomlPrimitive }; + +export const defaultRootDriveConfig = (): DriveConfig => ({ + builder: "docker", + dockerfile: "Dockerfile", // file on current working directory + extraSize: 0, + format: DEFAULT_FORMAT, + tags: [], +}); + +export const defaultRamImage = (): string => { + switch (os.platform()) { + case "darwin": + return DEFAULT_RAM_IMAGE_MAC; + default: + return DEFAULT_RAM_IMAGE_LINUX; + } +}; + +export const defaultMachineConfig = (): MachineConfig => ({ + assertRollingTemplate: undefined, + bootargs: [], + entrypoint: undefined, + maxMCycle: undefined, + noRollup: undefined, + ramLength: DEFAULT_RAM, + ramImage: defaultRamImage(), +}); + +export const defaultConfig = (): Config => ({ + drives: { root: defaultRootDriveConfig() }, + machine: defaultMachineConfig(), + sdk: DEFAULT_SDK, +}); + +const parseBoolean = (value: TomlPrimitive, defaultValue: boolean): boolean => { + if (value === undefined) { + return defaultValue; + } else if (typeof value === "boolean") { + return value; + } + throw new Error(`Invalid boolean value: ${value}`); +}; + +const parseOptionalBoolean = (value: TomlPrimitive): boolean | undefined => { + if (value === undefined) { + return undefined; + } else if (typeof value === "boolean") { + return value; + } + throw new Error(`Invalid boolean value: ${value}`); +}; + +const parseString = (value: TomlPrimitive, defaultValue: string): string => { + if (value === undefined) { + return defaultValue; + } else if (typeof value === "string") { + return value; + } + throw new Error(`Invalid string value: ${value}`); +}; + +const parseStringArray = (value: TomlPrimitive): string[] => { + if (value === undefined) { + return []; + } else if (typeof value === "string") { + return [value]; + } else if (typeof value === "object" && Array.isArray(value)) { + return value.map((v) => { + if (typeof v === "string") { + return v; + } + throw new Error(`Invalid string value: ${v}`); + }); + } + throw new Error(`Invalid string array value: ${value}`); +}; + +const parseRequiredString = (value: TomlPrimitive, key: string): string => { + if (value === undefined) { + throw new Error(`Missing required value: ${key}`); + } else if (typeof value === "string") { + return value; + } + throw new Error(`Invalid string value: ${value}`); +}; + +const parseOptionalString = (value: TomlPrimitive): string | undefined => { + if (value === undefined) { + return undefined; + } else if (typeof value === "string") { + return value; + } + throw new Error(`Invalid string value: ${value}`); +}; + +const parseOptionalStringBoolean = ( + value: TomlPrimitive, +): string | boolean | undefined => { + if (value === undefined) { + return undefined; + } else if (typeof value === "string") { + return value; + } else if (typeof value === "boolean") { + return value; + } + throw new Error(`Invalid string value: ${value}`); +}; + +const parseOptionalNumber = (value: TomlPrimitive): bigint | undefined => { + if (value === undefined) { + return undefined; + } else if (typeof value === "bigint") { + return value; + } else if (typeof value === "number") { + return BigInt(value); + } + throw new Error(`Invalid number value: ${value}`); +}; + +const parseBytes = (value: TomlPrimitive, defaultValue: number): number => { + if (value === undefined) { + return defaultValue; + } else if (typeof value === "bigint") { + return Number(value); + } else if (typeof value === "number" || typeof value === "string") { + return bytes.parse(value); + } + throw new Error(`Invalid bytes value: ${value}`); +}; + +const parseBuilder = (value: TomlPrimitive): Builder => { + if (value === undefined) { + return "docker"; + } else if (typeof value === "string") { + switch (value) { + case "directory": + return "directory"; + case "docker": + return "docker"; + case "empty": + return "empty"; + case "none": + return "none"; + case "tar": + return "tar"; + } + } + throw new Error(`Invalid builder: ${value}`); +}; + +const parseFormat = (value: TomlPrimitive): DriveFormat => { + if (value === undefined) { + return DEFAULT_FORMAT; + } else if (typeof value === "string") { + switch (value) { + case "ext2": + return "ext2"; + case "sqfs": + return "sqfs"; + } + } + throw new Error(`Invalid format: ${value}`); +}; + +const parseEmptyFormat = (value: TomlPrimitive): "ext2" | "raw" => { + if (value === undefined) { + return DEFAULT_FORMAT; + } else if (typeof value === "string") { + switch (value) { + case "ext2": + return "ext2"; + case "raw": + return "raw"; + } + } + throw new Error(`Invalid format: ${value}`); +}; + +const parseMachine = (value: TomlPrimitive): MachineConfig => { + if (value === undefined) { + // default machine + return defaultMachineConfig(); + } + if (typeof value !== "object") { + throw new Error(`Invalid machine configuration: ${value}`); + } + const toml = value as TomlTable; + + return { + assertRollingTemplate: parseOptionalBoolean( + toml["assert-rolling-template"], + ), + bootargs: parseStringArray(toml.bootargs), + maxMCycle: parseOptionalNumber(toml["max-mcycle"]), + noRollup: parseBoolean(toml["no-rollup"], false), + ramLength: parseString(toml["ram-length"], DEFAULT_RAM), + ramImage: parseString(toml["ram-image"], defaultRamImage()), + }; +}; + +export const getDriveFormat = (filename: string): DriveFormat => { + const extension = extname(filename); + switch (extension) { + case ".ext2": + return "ext2"; + case ".sqfs": + return "sqfs"; + default: + throw new Error(`Invalid drive format: ${extension}`); + } +}; + +const parseDrive = (drive: TomlPrimitive): DriveConfig => { + const builder = parseBuilder((drive as TomlTable).builder); + switch (builder) { + case "directory": { + const { extraSize, format, mount, directory, shared, user } = + drive as TomlTable; + return { + builder: "directory", + extraSize: parseBytes(extraSize, 0), + format: parseFormat(format), + mount: parseOptionalStringBoolean(mount), + directory: parseRequiredString(directory, "directory"), + shared: parseOptionalBoolean(shared), + user: parseOptionalString(user), + }; + } + case "docker": { + const { + dockerfile, + extraSize, + format, + image, + mount, + shared, + tags, + target, + user, + } = drive as TomlTable; + return { + builder: "docker", + image: parseOptionalString(image), + dockerfile: parseString(dockerfile, "Dockerfile"), + extraSize: parseBytes(extraSize, 0), + format: parseFormat(format), + mount: parseOptionalStringBoolean(mount), + shared: parseOptionalBoolean(shared), + user: parseOptionalString(user), + tags: parseStringArray(tags), + target: parseOptionalString(target), + }; + } + case "empty": { + const { format, mount, size, shared, user } = drive as TomlTable; + return { + builder: "empty", + format: parseEmptyFormat(format), + mount: parseOptionalStringBoolean(mount), + shared: parseOptionalBoolean(shared), + size: parseBytes(size, 0), + user: parseOptionalString(user), + }; + } + case "tar": { + const { extraSize, filename, format, mount, shared, user } = + drive as TomlTable; + return { + builder: "tar", + extraSize: parseBytes(extraSize, 0), + filename: parseRequiredString(filename, "filename"), + format: parseFormat(format), + mount: parseOptionalStringBoolean(mount), + shared: parseOptionalBoolean(shared), + user: parseOptionalString(user), + }; + } + case "none": { + const { shared, mount, user } = drive as TomlTable; + const filename = parseRequiredString( + (drive as TomlTable).filename, + "filename", + ); + const format = getDriveFormat(filename); + return { + builder: "none", + filename, + format, + mount: parseOptionalStringBoolean(mount), + shared: parseOptionalBoolean(shared), + user: parseOptionalString(user), + }; + } + } +}; + +const parseDrives = (config: TomlPrimitive): Record => { + // load drives from configuration + const drives = Object.entries((config as TomlTable) ?? {}).reduce< + Record + >((acc, [name, drive]) => { + acc[name] = parseDrive(drive); + return acc; + }, {}); + + // check if there is a root drive + const hasRoot = drives.root !== undefined; + if (!hasRoot) { + // there is no root drive, add a default one + drives.root = defaultRootDriveConfig(); + } + return drives; +}; + +export const parse = (str: string): Config => { + const toml = parseToml(str); + + const config: Config = { + drives: parseDrives(toml.drives), + machine: parseMachine(toml.machine), + sdk: parseString(toml.sdk, DEFAULT_SDK), + }; + + return config; +}; diff --git a/apps/cli/src/exec.ts b/apps/cli/src/exec.ts new file mode 100644 index 00000000..8847d43d --- /dev/null +++ b/apps/cli/src/exec.ts @@ -0,0 +1,100 @@ +import { spawnSync, SpawnSyncOptions } from "child_process"; +import { execa, ExecaError, Options } from "execa"; +import os from "os"; + +/** + * Calls execa and falls back to docker run if command (on the host) fails + * @param command command to be executed + * @param args arguments to be passed to the command + * @param options execution options + * @returns return of execa + */ +export type ExecaOptionsDockerFallback = Options & { image?: string }; +export const execaDockerFallback = async ( + command: string, + args: readonly string[], + options: ExecaOptionsDockerFallback, +) => { + try { + return await execa(command, args, options); + } catch (error) { + if (error instanceof ExecaError) { + if (error.code === "ENOENT" && options.image) { + console.warn( + `error executing '${command}', falling back to docker execution using image '${options.image}'`, + ); + const userInfo = os.userInfo(); + const dockerOpts = [ + "--volume", + `${options.cwd}:/work`, + "--workdir", + "/work", + "--user", + `${userInfo.uid}:${userInfo.gid}`, + ]; + return await execa( + "docker", + ["run", ...dockerOpts, options.image, command, ...args], + options, + ); + } else { + console.error(`error executing '${command}'`, error); + } + } + throw error; + } +}; + +/** + * Calls spawnSync and falls back to docker run if command (on the host) fails + * @param command command to be executed + * @param args arguments to be passed to the command + * @param options execution options + * @returns return of execa + */ +export type SpawnOptionsDockerFallback = SpawnSyncOptions & { image?: string }; +export const spawnSyncDockerFallback = async ( + command: string, + args: readonly string[], + options: SpawnOptionsDockerFallback, +) => { + const result = spawnSync(command, args, options); + if (result.error) { + const code = (result.error as any).code; + if (code === "ENOENT" && options.image) { + console.warn( + `error executing '${command}', falling back to docker execution using image '${options.image}'`, + ); + const userInfo = os.userInfo(); + const dockerOpts = [ + "--volume", + `${options.cwd}:/work`, + "--workdir", + "/work", + "--interactive", + "--user", + `${userInfo.uid}:${userInfo.gid}`, + ]; + const dockerArgs = [ + "run", + ...dockerOpts, + options.image, + command, + ...args, + ]; + const dockerResult = spawnSync("docker", dockerArgs, options); + if (dockerResult.error) { + console.error( + `error executing '${command}'`, + dockerResult.error, + ); + throw dockerResult.error; + } + return dockerResult; + } else { + console.error(`error executing '${command}'`, result.error); + throw result.error; + } + } + return result; +}; diff --git a/apps/cli/test/config.test.ts b/apps/cli/test/config.test.ts new file mode 100644 index 00000000..15efc305 --- /dev/null +++ b/apps/cli/test/config.test.ts @@ -0,0 +1,86 @@ +import { describe, expect, it } from "vitest"; +import { defaultConfig, defaultMachineConfig, parse } from "../src/config.js"; + +describe("config", () => { + it("default config", () => { + const config = parse(""); + expect(config).toEqual(defaultConfig()); + }); + + it("non-standard root drive", () => { + const config = parse(`[drives.root] +builder = "docker" +dockerfile = "backend/Dockerfile" +shared = true`); + + expect(config).toEqual({ + ...defaultConfig(), + drives: { + root: { + builder: "docker", + dockerfile: "backend/Dockerfile", + extraSize: 0, + format: "ext2", + image: undefined, + mount: undefined, + tags: [], + target: undefined, + shared: true, + user: undefined, + }, + }, + }); + }); + + it("invalid drive", () => { + expect(parse("drives = 42")).toEqual(defaultConfig()); + expect(parse("drives.root = true")).toEqual(defaultConfig()); + expect(parse("drives.root = 42")).toEqual(defaultConfig()); + }); + + it("invalid drive: invalid builder", () => { + expect(() => parse('[drives.root]\nbuilder = "invalid"')).toThrowError( + "Invalid builder: invalid", + ); + expect(() => parse("[drives.root]\nbuilder = true")).toThrowError( + "Invalid builder: true", + ); + expect(() => parse("[drives.root]\nbuilder = 10")).toThrowError( + "Invalid builder: 10", + ); + expect(() => parse("[drives.root]\nbuilder = {}")).toThrowError( + "Invalid builder: [object Object]", + ); + }); + + it("invalid drive: invalid format", () => { + expect(() => parse('[drives.root]\nformat = "invalid"')).toThrowError( + "Invalid format: invalid", + ); + expect(() => parse("[drives.root]\nformat = true")).toThrowError( + "Invalid format: true", + ); + expect(() => parse("[drives.root]\nformat = 10")).toThrowError( + "Invalid format: 10", + ); + expect(() => parse("[drives.root]\nformat = {}")).toThrowError( + "Invalid format: [object Object]", + ); + }); + + it("invalid drive: invalid mount", () => { + expect(() => parse("[drives.data]\nmount = 42")).toThrowError( + "Invalid string value: 42", + ); + }); + + it("machine-config", () => { + expect(parse("[machine]\nno-rollup = true")).toEqual({ + ...defaultConfig(), + machine: { + ...defaultMachineConfig(), + noRollup: true, + }, + }); + }); +}); diff --git a/apps/cli/test/configs/default.toml b/apps/cli/test/configs/default.toml new file mode 100644 index 00000000..0f570539 --- /dev/null +++ b/apps/cli/test/configs/default.toml @@ -0,0 +1,2 @@ +# a default configuration is an empty one +# meaning a Cartesi project does not require a cartesi.toml config file diff --git a/apps/cli/test/configs/drives/basic.toml b/apps/cli/test/configs/drives/basic.toml new file mode 100644 index 00000000..13587a50 --- /dev/null +++ b/apps/cli/test/configs/drives/basic.toml @@ -0,0 +1,7 @@ +# this is the basic configuration of a root flash drive built with Docker +# this is also the default configuration for a root flash drive + +[drives.root] +builder = "docker" +dockerfile = "Dockerfile" +format = "ext2" diff --git a/apps/cli/test/configs/drives/data.toml b/apps/cli/test/configs/drives/data.toml new file mode 100644 index 00000000..bc0ac0ec --- /dev/null +++ b/apps/cli/test/configs/drives/data.toml @@ -0,0 +1,7 @@ +# example of a drive with project files + +[drives.data] +builder = "directory" +directory = "./data" # required +extraSize = "100Mb" # optional. size is given by directory content size plus this amount +mount = "/var/lib/app" # optional, default is /mnt/{name} diff --git a/apps/cli/test/configs/drives/empty.toml b/apps/cli/test/configs/drives/empty.toml new file mode 100644 index 00000000..7564982d --- /dev/null +++ b/apps/cli/test/configs/drives/empty.toml @@ -0,0 +1,7 @@ +# example of an empty drive to hold application data + +[drives.data] +builder = "empty" +size = "100Mb" # size can be given as string, or as a number in bytes +mount = "/var/lib/app" # default is /mnt/{name} +# format is always ext2, as sqfs is read-only, and a read-only empty drive does not really make sense diff --git a/apps/cli/test/configs/drives/none.toml b/apps/cli/test/configs/drives/none.toml new file mode 100644 index 00000000..aa4c1d4d --- /dev/null +++ b/apps/cli/test/configs/drives/none.toml @@ -0,0 +1,6 @@ +# example of a drive that is ready +# this is useful in case drive was previouly built by another process + +[drives.root] +builder = "none" +filename = "./rootfs-tools-v0.15.0.ext2" diff --git a/apps/cli/test/configs/drives/rives.toml b/apps/cli/test/configs/drives/rives.toml new file mode 100644 index 00000000..83e20898 --- /dev/null +++ b/apps/cli/test/configs/drives/rives.toml @@ -0,0 +1,15 @@ +# example inspired by Rives. Games are added as sqfs files, which come from an existing external build process +[drives.root] +builder = "docker" +dockerfile = "Dockerfile" +format = "ext2" + +[drives.doom] +builder = "none" +filename = "./games/doom.sqfs" +mount = "/usr/local/games/doom" + +[drives.tetrix] +builder = "none" +filename = "./games/tetrix.sqfs" +mount = "/usr/local/games/tetrix" diff --git a/apps/cli/test/configs/drives/tar.toml b/apps/cli/test/configs/drives/tar.toml new file mode 100644 index 00000000..9cd8b95d --- /dev/null +++ b/apps/cli/test/configs/drives/tar.toml @@ -0,0 +1,6 @@ +# example of a drive built with contents of a tar file +# this is useful if the developer wants to take care of the tar procedure + +[drives.data] +builder = "tar" +filename = "build/files.tar" diff --git a/apps/cli/test/configs/full.toml b/apps/cli/test/configs/full.toml new file mode 100644 index 00000000..9c60c3df --- /dev/null +++ b/apps/cli/test/configs/full.toml @@ -0,0 +1,45 @@ +# sdk = "cartesi/sdk:0.6.0" +# runtime = "rollups" +# runtime = "lambada" + +# [machine] +# assert_rolling_update = true +# bootargs = ["no4lvl", "quiet", "earlycon=sbi", "console=hvc0", "rootfstype=ext2", "root=/dev/pmem0", "rw", "init=/usr/sbin/cartesi-init"] +# entrypoint = "/usr/local/bin/app" +# max-mcycle = 0 +# no-rollup = false +# ram-image = "/usr/share/cartesi-machine/images/linux.bin" # directory inside SDK image +# ram-length = "128Mi" + +# [drives.root] +# builder = "docker" +# dockerfile = "Dockerfile" +# target = "docker-multi-stage-target" +# format = "ext2" +# format = "sqfs" +# extraSize = "100Mb" # optional. size is given by directory content size plus this amount + +# [drives.data] +# builder = "empty" +# size = "100Mb" # size can be given as string, or as a number in bytes +# mount = "/var/lib/app" # default is /mnt/{name} + +# [drives.data] +# builder = "directory" +# directory = "./data" # required +# extraSize = "100Mb" # optional. size is given by directory content size plus this amount +# format = "ext2" +# format = "sqfs" +# mount = "/var/lib/app" # optional, default is /mnt/{name} + +# [drives.data] +# builder = "tar" +# filename = "build/files.tar" +# extraSize = "100Mb" # optional. size is given by directory content size plus this amount +# mount = "/var/lib/app" # optional, default is /mnt/{name} + +# [drives.doom] +# builder = "none" +# filename = "./games/doom.sqfs" +# mount = "/usr/local/games/doom" + diff --git a/apps/cli/test/configs/machine/bootargs.toml b/apps/cli/test/configs/machine/bootargs.toml new file mode 100644 index 00000000..e69de29b diff --git a/apps/cli/test/configs/machine/no_boot.toml b/apps/cli/test/configs/machine/no_boot.toml new file mode 100644 index 00000000..909af54c --- /dev/null +++ b/apps/cli/test/configs/machine/no_boot.toml @@ -0,0 +1,5 @@ +# example of a machine that doesn't run until yield + +[machine] +assert_rolling_update = false +max-mcycle = 0 diff --git a/apps/cli/test/tsconfig.json b/apps/cli/test/tsconfig.json deleted file mode 100644 index 342af470..00000000 --- a/apps/cli/test/tsconfig.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "extends": "../tsconfig", - "compilerOptions": { - "noEmit": true - } -} diff --git a/apps/cli/tsconfig.build.json b/apps/cli/tsconfig.build.json new file mode 100644 index 00000000..d5567cc5 --- /dev/null +++ b/apps/cli/tsconfig.build.json @@ -0,0 +1,7 @@ +{ + "extends": "./tsconfig.json", + "include": ["src/**/*.ts"], + "compilerOptions": { + "rootDir": "src" + } +} diff --git a/apps/cli/tsconfig.json b/apps/cli/tsconfig.json index 1dfaaee1..a1322dbe 100644 --- a/apps/cli/tsconfig.json +++ b/apps/cli/tsconfig.json @@ -1,12 +1,11 @@ { "extends": "tsconfig/base.json", - "include": ["src/**/*.ts"], + "include": ["**/*.ts"], "exclude": ["node_modules"], "compilerOptions": { "module": "ES2020", "importHelpers": true, "outDir": "dist", - "rootDir": "src", "target": "es2020" }, "ts-node": { diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 4768233e..4102eb4a 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -80,6 +80,9 @@ importers: semver: specifier: ^7.6.3 version: 7.6.3 + smol-toml: + specifier: ^1.3.0 + version: 1.3.0 tmp: specifier: ^0.2.3 version: 0.2.3 @@ -5719,6 +5722,10 @@ packages: resolution: {integrity: sha512-3dOsAHXXUkQTpOYcoAxLIorMTp4gIQr5IW3iVb7A7lFIp0VHhnynm9izx6TssdrIcVIESAlVjtnO2K8bg+Coew==} engines: {node: '>=12'} + smol-toml@1.3.0: + resolution: {integrity: sha512-tWpi2TsODPScmi48b/OQZGi2lgUmBCHy6SZrhi/FdnnHiU1GwebbCfuQuxsC3nHaLwtYeJGPrDZDIeodDOc4pA==} + engines: {node: '>= 18'} + snake-case@3.0.4: resolution: {integrity: sha512-LAOh4z89bGQvl9pFfNF8V146i7o7/CqFPbqzYgP+yYzDIDeS9HaNFtXABamRW+AQzEVODcvE79ljJ+8a9YSdMg==} @@ -13580,6 +13587,8 @@ snapshots: slash@4.0.0: {} + smol-toml@1.3.0: {} + snake-case@3.0.4: dependencies: dot-case: 3.0.4