From 8b8c30f18e604a121578a39e72abece2bbb8f36e Mon Sep 17 00:00:00 2001 From: Marius Andra Date: Sun, 9 Mar 2025 16:01:40 +0100 Subject: [PATCH 1/4] cross compilation early version --- Dockerfile | 8 ++++++++ backend/app/tasks/deploy_frame.py | 24 +++++++++++++++++++++--- backend/app/utils/ssh_utils.py | 8 +++++--- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index 34151a77..8bbe2aac 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,6 +30,14 @@ ENV PATH="/opt/nim/bin:${PATH}" RUN nim --version \ nimble --version +# Add ARM64 architecture and install cross compiler and cross libraries +RUN dpkg --add-architecture arm64 \ + && apt-get update \ + && apt-get install -y \ + crossbuild-essential-arm64 \ + libc6-dev:arm64 \ + && rm -rf /var/lib/apt/lists/* + # Copy the requirements file and install using pip WORKDIR /app/backend COPY backend/requirements.txt . diff --git a/backend/app/tasks/deploy_frame.py b/backend/app/tasks/deploy_frame.py index 599d0f7d..0105f817 100644 --- a/backend/app/tasks/deploy_frame.py +++ b/backend/app/tasks/deploy_frame.py @@ -106,7 +106,26 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: db, redis, frame, build_dir, build_id, nim_path, source_dir, temp_dir, cpu ) - if low_memory: + cross_compiling = False + if cpu == "arm64": + response_code, _, _ = await exec_local_command(db, redis, frame, "aarch64-linux-gnu-gcc --version", generate_log=False) + if response_code == 0: + cross_compiling = True + await log(db, redis, id, "stdout", "- Cross compiling for ARM64") + + await exec_local_command( + db, redis, frame, + f"cd {build_dir} && " + f"make clean && " + f"make CC=aarch64-linux-gnu-gcc -j$(nproc)" + ) + else: + await log(db, redis, id, "stdout", f"- Cross compilation not available for {cpu}: aarch64-linux-gnu-gcc not installed.") + else: + await log(db, redis, id, "stdout", f"- Cross compilation not available for {cpu}. Compiling on device") + + + if low_memory and not cross_compiling: await log(db, redis, id, "stdout", "- Low memory device, stopping FrameOS for compilation") await exec_command(db, redis, frame, ssh, "sudo service frameos stop", raise_on_error=False) @@ -469,8 +488,7 @@ async def create_local_build_archive( shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "env"), ignore_errors=True) shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "__pycache__"), ignore_errors=True) - await log(db, redis, int(frame.id), "stdout", - "- No cross compilation. Generating source code for compilation on frame.") + await log(db, redis, int(frame.id), "stdout", "- Generating source code for compilation.") debug_options = "--lineTrace:on" if frame.debug else "" cmd = ( diff --git a/backend/app/utils/ssh_utils.py b/backend/app/utils/ssh_utils.py index 5280e72a..d9226244 100644 --- a/backend/app/utils/ssh_utils.py +++ b/backend/app/utils/ssh_utils.py @@ -319,7 +319,8 @@ async def exec_local_command(db: Session, redis: ArqRedis, frame: Frame, command output = process.stdout.readline() if not output: break - await log(db, redis, int(frame.id), "stdout", output) + if generate_log: + await log(db, redis, int(frame.id), "stdout", output) outputs.append(output) if process.stderr: @@ -327,7 +328,8 @@ async def exec_local_command(db: Session, redis: ArqRedis, frame: Frame, command error = process.stderr.readline() if not error: break - await log(db, redis, int(frame.id), "stderr", error) + if generate_log: + await log(db, redis, int(frame.id), "stderr", error) errors.append(error) if break_next: @@ -337,7 +339,7 @@ async def exec_local_command(db: Session, redis: ArqRedis, frame: Frame, command await asyncio.sleep(0.1) exit_status = process.returncode - if exit_status != 0: + if exit_status != 0 and generate_log: await log(db, redis, int(frame.id), "exit_status", f"The command exited with status {exit_status}") return (exit_status, From a313be9e8fbec1d5d3dd2a756e93a40e4de9a66f Mon Sep 17 00:00:00 2001 From: Marius Andra Date: Tue, 11 Mar 2025 23:55:55 +0100 Subject: [PATCH 2/4] compile nimc locally --- backend/app/tasks/deploy_frame.py | 385 ++++++++++++++++++++---------- frameos/tools/nimc.Makefile | 2 +- 2 files changed, 258 insertions(+), 129 deletions(-) diff --git a/backend/app/tasks/deploy_frame.py b/backend/app/tasks/deploy_frame.py index 0105f817..042b86f3 100644 --- a/backend/app/tasks/deploy_frame.py +++ b/backend/app/tasks/deploy_frame.py @@ -30,10 +30,15 @@ async def deploy_frame(id: int, redis: Redis): + """Queue a job to deploy a frame by ID.""" await redis.enqueue_job("deploy_frame", id=id) async def deploy_frame_task(ctx: dict[str, Any], id: int): + """ + Main deployment logic for building, packaging, and deploying + the Nim (FrameOS) application onto a target device via SSH. + """ db: Session = ctx['db'] redis: Redis = ctx['redis'] @@ -49,11 +54,10 @@ async def deploy_frame_task(ctx: dict[str, Any], id: int): if frame.status == 'deploying': raise Exception("Already deploying. Request again to force redeploy.") - frame_dict = frame.to_dict() # persisted as frame.last_successful_deploy if successful - if "last_successful_deploy" in frame_dict: - del frame_dict["last_successful_deploy"] - if "last_successful_deploy_at" in frame_dict: - del frame_dict["last_successful_deploy_at"] + # We do not want to persist these fields if successful. + frame_dict = frame.to_dict() + frame_dict.pop("last_successful_deploy", None) + frame_dict.pop("last_successful_deploy_at", None) frame.status = 'deploying' await update_frame(db, redis, frame) @@ -65,77 +69,91 @@ async def deploy_frame_task(ctx: dict[str, Any], id: int): ssh = await get_ssh_connection(db, redis, frame) async def install_if_necessary(pkg: str, raise_on_error=True) -> int: - return await exec_command( - db, redis, frame, ssh, - f"dpkg -l | grep -q \"^ii {pkg}\" || sudo apt-get install -y {pkg}", - raise_on_error=raise_on_error - ) - - with tempfile.TemporaryDirectory() as temp_dir: - await log(db, redis, id, "stdout", "- Getting target architecture") - uname_output: list[str] = [] - await exec_command(db, redis, frame, ssh, "uname -m", uname_output) - arch = "".join(uname_output).strip() - if arch in ("aarch64", "arm64"): - cpu = "arm64" - elif arch in ("armv6l", "armv7l"): - cpu = "arm" - elif arch == "i386": - cpu = "i386" - else: - cpu = "amd64" - - total_memory = 0 - try: - mem_output: list[str] = [] - await exec_command(db, redis, frame, ssh, "free -m", mem_output) - total_memory = int(mem_output[1].split()[1]) # line 1 => "Mem: ... 991 ..." - except Exception as e: - await log(db, redis, id, "stderr", str(e)) - low_memory = total_memory < 512 + """ + Installs package `pkg` on the remote device if it's not already installed. + """ + cmd = f"dpkg -l | grep -q \"^ii {pkg}\" || sudo apt-get install -y {pkg}" + return await exec_command(db, redis, frame, ssh, cmd, raise_on_error=raise_on_error) + + # 1. Detect target architecture on the remote device. + await log(db, redis, id, "stdout", "- Getting target architecture") + uname_output: list[str] = [] + await exec_command(db, redis, frame, ssh, "uname -m", uname_output) + arch = "".join(uname_output).strip() + + # Simplify arch -> "arm64", "arm", "amd64", "i386" ... + cpu = get_target_cpu(arch) + + # Check total memory (for Pi Zero, etc.) so we can stop the service during local (on-device) compile + total_memory = 0 + try: + mem_output: list[str] = [] + await exec_command(db, redis, frame, ssh, "free -m", mem_output) + # mem_output[0]: " total used free shared buff/cache available" + # mem_output[1]: "Mem: 991 223 ... etc." + # We'll parse line 1 + total_memory = int(mem_output[1].split()[1]) + except Exception as e: + await log(db, redis, id, "stderr", str(e)) + low_memory = total_memory < 512 - drivers = drivers_for_frame(frame) + drivers = drivers_for_frame(frame) - # 1. Create build tar.gz locally + # 2. Build or cross-compile locally, then package up the result + with tempfile.TemporaryDirectory() as temp_dir: await log(db, redis, id, "stdout", "- Copying build folders") build_dir, source_dir = create_build_folders(temp_dir, build_id) + await log(db, redis, id, "stdout", "- Applying local modifications") await make_local_modifications(db, redis, frame, source_dir) + await log(db, redis, id, "stdout", "- Creating build archive") - archive_path = await create_local_build_archive( - db, redis, frame, build_dir, build_id, nim_path, source_dir, temp_dir, cpu - ) - cross_compiling = False - if cpu == "arm64": - response_code, _, _ = await exec_local_command(db, redis, frame, "aarch64-linux-gnu-gcc --version", generate_log=False) + # Decide if we can cross-compile for the target + do_cross_compile = False + cross_compiler = get_cross_compiler_for_cpu(cpu) + if cross_compiler: + response_code, _, _ = await exec_local_command(db, redis, frame, f"{cross_compiler} --version", generate_log=False) if response_code == 0: - cross_compiling = True - await log(db, redis, id, "stdout", "- Cross compiling for ARM64") - - await exec_local_command( - db, redis, frame, - f"cd {build_dir} && " - f"make clean && " - f"make CC=aarch64-linux-gnu-gcc -j$(nproc)" - ) - else: - await log(db, redis, id, "stdout", f"- Cross compilation not available for {cpu}: aarch64-linux-gnu-gcc not installed.") - else: - await log(db, redis, id, "stdout", f"- Cross compilation not available for {cpu}. Compiling on device") + do_cross_compile = True + # 2A. Generate Nim -> C code. This always happens locally (we need the C code). + archive_path = await create_local_build_archive( + db, redis, frame, + build_dir, build_id, nim_path, source_dir, temp_dir, cpu + ) - if low_memory and not cross_compiling: + # 2B. If cross-compiling is possible, actually produce final `frameos` locally. + if do_cross_compile: + await log(db, redis, id, "stdout", f"- Cross compiling for {cpu} using {cross_compiler}") + cmd = f"cd {build_dir} && make clean && make CC={cross_compiler} -j$(nproc)" + status, out, err = await exec_local_command(db, redis, frame, cmd) + if status != 0: + raise Exception("Cross-compilation failed. See logs for details.") + + # Re-create the tar AFTER the final binary is present + # We remove the old one, then tar again so it has `frameos` in it. + os.remove(archive_path) + archive_path = os.path.join(temp_dir, f"build_{build_id}.tar.gz") + zip_base = os.path.join(temp_dir, f"build_{build_id}") + shutil.make_archive(zip_base, 'gztar', temp_dir, f"build_{build_id}") + + # 3. On low-memory devices, stop FrameOS if we must compile there + # i.e. we do NOT have cross_compiled. + if not do_cross_compile and low_memory: await log(db, redis, id, "stdout", "- Low memory device, stopping FrameOS for compilation") await exec_command(db, redis, frame, ssh, "sudo service frameos stop", raise_on_error=False) - # 2. Remote steps - await install_if_necessary("ntp") - await install_if_necessary("build-essential") + # 4. Install build dependencies on device (only if we plan to compile on device) + if not do_cross_compile: + await install_if_necessary("build-essential") + + await install_if_necessary("ntp") # Keep time in sync if drivers.get("evdev"): await install_if_necessary("libevdev-dev") + # 4A. Install liblgpio if needed (for waveshare or gpioButton). if drivers.get("waveshare") or drivers.get("gpioButton"): check_lgpio = await exec_command( db, redis, frame, ssh, @@ -145,8 +163,7 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: if check_lgpio != 0: # Try installing liblgpio-dev if await install_if_necessary("liblgpio-dev", raise_on_error=False) != 0: - await log(db, redis, id, "stdout", - "--> Could not find liblgpio-dev. Installing from source.") + await log(db, redis, id, "stdout", "--> Could not find liblgpio-dev. Installing from source.") command = ( "if [ ! -f /usr/local/include/lgpio.h ]; then " " rm -rf /tmp/lgpio-install && " @@ -162,7 +179,7 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: ) await exec_command(db, redis, frame, ssh, command) - # Any app dependencies + # 5. Check for app dependencies (APT packages declared in config.json) all_deps = set() for scene in frame.scenes: try: @@ -175,22 +192,22 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: json_config = get_one_app_sources(app).get('config.json') if json_config: config = json.loads(json_config) - if node.get('type') == 'source': + elif node.get('type') == 'source': json_config = node.get('sources', {}).get('config.json') if json_config: config = json.loads(json_config) - if config: - if config.get('apt'): - for dep in config['apt']: - all_deps.add(dep) + if config and config.get('apt'): + for dep in config['apt']: + all_deps.add(dep) except Exception as e: await log(db, redis, id, "stderr", f"Error parsing node: {e}") except Exception as e: await log(db, redis, id, "stderr", f"Error parsing scene: {e}") + for dep in all_deps: await install_if_necessary(dep) - # Ensure /srv/frameos + # Ensure /srv/frameos on device await exec_command(db, redis, frame, ssh, "if [ ! -d /srv/frameos/ ]; then " " sudo mkdir -p /srv/frameos/ && sudo chown $(whoami):$(whoami) /srv/frameos/; " @@ -199,40 +216,51 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: await exec_command(db, redis, frame, ssh, "mkdir -p /srv/frameos/build/ /srv/frameos/logs/") await log(db, redis, id, "stdout", f"> add /srv/frameos/build/build_{build_id}.tar.gz") - # 3. Upload the local tarball + # 6. Upload the local tarball to the device await asyncssh.scp( archive_path, (ssh, f"/srv/frameos/build/build_{build_id}.tar.gz"), recurse=False ) - # Unpack & compile on device - await exec_command(db, redis, frame, ssh, - f"cd /srv/frameos/build && tar -xzf build_{build_id}.tar.gz && rm build_{build_id}.tar.gz") - await exec_command(db, redis, frame, ssh, - f"cd /srv/frameos/build/build_{build_id} && " - "PARALLEL_MEM=$(awk '/MemTotal/{printf \"%.0f\\n\", $2/1024/250}' /proc/meminfo) && " - "PARALLEL=$(($PARALLEL_MEM < $(nproc) ? $PARALLEL_MEM : $(nproc))) && " - "make -j$PARALLEL") + # 7. If we haven't cross-compiled locally, compile on the device + await exec_command( + db, redis, frame, ssh, + f"cd /srv/frameos/build && tar -xzf build_{build_id}.tar.gz && rm build_{build_id}.tar.gz" + ) + + if not do_cross_compile: + # device-based compile + await exec_command( + db, redis, frame, ssh, + f"cd /srv/frameos/build/build_{build_id} && " + "PARALLEL_MEM=$(awk '/MemTotal/{printf \"%.0f\\n\", $2/1024/250}' /proc/meminfo) && " + "PARALLEL=$(($PARALLEL_MEM < $(nproc) ? $PARALLEL_MEM : $(nproc))) && " + "make -j$PARALLEL" + ) + # 8. Move final binary into a release folder await exec_command(db, redis, frame, ssh, f"mkdir -p /srv/frameos/releases/release_{build_id}") - await exec_command(db, redis, frame, ssh, - f"cp /srv/frameos/build/build_{build_id}/frameos " - f"/srv/frameos/releases/release_{build_id}/frameos") + await exec_command( + db, redis, frame, ssh, + f"cp /srv/frameos/build/build_{build_id}/frameos /srv/frameos/releases/release_{build_id}/frameos" + ) - # 4. Upload frame.json using a TEMP FILE approach + # 9. Upload frame.json to the new release frame_json_data = (json.dumps(get_frame_json(db, frame), indent=4) + "\n").encode('utf-8') with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as tmpf: local_json_path = tmpf.name tmpf.write(frame_json_data) + await asyncssh.scp( - local_json_path, (ssh, f"/srv/frameos/releases/release_{build_id}/frame.json"), + local_json_path, + (ssh, f"/srv/frameos/releases/release_{build_id}/frame.json"), recurse=False ) - os.remove(local_json_path) # remove local temp file + os.remove(local_json_path) await log(db, redis, id, "stdout", f"> add /srv/frameos/releases/release_{build_id}/frame.json") - # Driver-specific vendor steps + # 10. Vendor steps for certain drivers if inkyPython := drivers.get("inkyPython"): await exec_command(db, redis, frame, ssh, f"mkdir -p /srv/frameos/vendor && " @@ -262,7 +290,7 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: "env/bin/pip3 install -r requirements.txt && " "sha256sum requirements.txt > requirements.txt.sha256sum))") - # 5. Upload frameos.service with a TEMP FILE approach + # 11. Upload and enable frameos.service with open("../frameos/frameos.service", "r") as f: service_contents = f.read().replace("%I", frame.ssh_user) service_data = service_contents.encode('utf-8') @@ -285,12 +313,12 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: await exec_command(db, redis, frame, ssh, "sudo chown root:root /etc/systemd/system/frameos.service") await exec_command(db, redis, frame, ssh, "sudo chmod 644 /etc/systemd/system/frameos.service") - # 6. Link new release + # 12. Link new release to /srv/frameos/current await exec_command(db, redis, frame, ssh, f"rm -rf /srv/frameos/current && " f"ln -s /srv/frameos/releases/release_{build_id} /srv/frameos/current") - # Figure out the difference between /srv/assets and the local assets folder + # 13. Sync assets (upload new or changed files in /assets) await sync_assets(db, redis, frame, ssh) # Clean old builds @@ -303,15 +331,16 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: "ls -dt1 release_* | grep -v \"$(basename $(readlink ../current))\" " "| tail -n +11 | xargs rm -rf") + # 14. Additional device config if needed boot_config = "/boot/config.txt" if await exec_command(db, redis, frame, ssh, "test -f /boot/firmware/config.txt", raise_on_error=False) == 0: boot_config = "/boot/firmware/config.txt" - # Additional device config if drivers.get("i2c"): + # Ensure i2c is enabled await exec_command(db, redis, frame, ssh, - 'grep -q "^dtparam=i2c_vc=on$" ' + boot_config + ' ' - '|| echo "dtparam=i2c_vc=on" | sudo tee -a ' + boot_config) + f'grep -q "^dtparam=i2c_vc=on$" {boot_config} ' + f'|| echo "dtparam=i2c_vc=on" | sudo tee -a {boot_config}') await exec_command(db, redis, frame, ssh, 'command -v raspi-config > /dev/null && ' 'sudo raspi-config nonint get_i2c | grep -q "1" && { ' @@ -323,6 +352,7 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: elif drivers.get("noSpi"): await exec_command(db, redis, frame, ssh, 'sudo raspi-config nonint do_spi 1') + # On low memory devices, disable some apt timers if low_memory: await exec_command( db, redis, frame, ssh, @@ -331,25 +361,32 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: "sudo systemctl disable apt-daily.service apt-daily.timer apt-daily-upgrade.timer apt-daily-upgrade.service" ) + # Reboot or auto-restart logic if frame.reboot and frame.reboot.get('enabled') == 'true': cron_schedule = frame.reboot.get('crontab', '0 0 * * *') if frame.reboot.get('type') == 'raspberry': crontab = f"{cron_schedule} root /sbin/shutdown -r now" else: crontab = f"{cron_schedule} root systemctl restart frameos.service" - await exec_command(db, redis, frame, ssh, - f"echo '{crontab}' | sudo tee /etc/cron.d/frameos-reboot") + await exec_command(db, redis, frame, ssh, f"echo '{crontab}' | sudo tee /etc/cron.d/frameos-reboot") else: await exec_command(db, redis, frame, ssh, "sudo rm -f /etc/cron.d/frameos-reboot") + # Possibly append lines to the Pi boot config and require a reboot must_reboot = False if drivers.get("bootconfig"): for line in drivers["bootconfig"].lines: - if await exec_command(db, redis, frame, ssh, - f'grep -q "^{line}" ' + boot_config, raise_on_error=False) != 0: - await exec_command(db, redis, frame, ssh, command=f'echo "{line}" | sudo tee -a ' + boot_config, log_output=False) + cmd = f'grep -q "^{line}" {boot_config}' + if await exec_command(db, redis, frame, ssh, cmd, raise_on_error=False) != 0: + # not found in boot_config, so append + await exec_command( + db, redis, frame, ssh, + f'echo "{line}" | sudo tee -a {boot_config}', + log_output=False + ) must_reboot = True + # Enable & start the service await exec_command(db, redis, frame, ssh, "sudo systemctl daemon-reload") await exec_command(db, redis, frame, ssh, "sudo systemctl enable frameos.service") @@ -357,6 +394,7 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: frame.last_successful_deploy = frame_dict frame.last_successful_deploy_at = datetime.now(timezone.utc) + # Reboot if boot config changed if must_reboot: await update_frame(db, redis, frame) await log(db, redis, int(frame.id), "stdinfo", "Deployed! Rebooting device after boot config changes") @@ -376,7 +414,38 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: await remove_ssh_connection(db, redis, ssh, frame) +def get_target_cpu(arch: str) -> str: + """ + Map 'uname -m' output to something Nim expects in --cpu + and that we can match with cross compilers. + """ + if arch in ("aarch64", "arm64"): + return "arm64" + elif arch in ("armv6l", "armv7l"): + return "arm" + elif arch == "i386": + return "i386" + # Fallback + return "amd64" + + +def get_cross_compiler_for_cpu(cpu: str) -> Optional[str]: + """ + Return the cross-compiler command for a given CPU, + or None if there's no well-known cross-compiler for that CPU. + """ + if cpu == "arm64": + return "aarch64-linux-gnu-gcc" + elif cpu == "arm": + return "arm-linux-gnueabihf-gcc" + return None + + def find_nim_v2(): + """ + Locate a Nim executable >= 2.0.0. + Raises an exception if not found or if version < 2.0.0. + """ nim_path = find_nim_executable() if not nim_path: raise Exception("Nim executable not found") @@ -387,6 +456,10 @@ def find_nim_v2(): def create_build_folders(temp_dir, build_id): + """ + Create local build directories to store Nim source + build artifacts. + Returns (build_dir, source_dir). + """ build_dir = os.path.join(temp_dir, f"build_{build_id}") source_dir = os.path.join(temp_dir, "frameos") os.makedirs(source_dir, exist_ok=True) @@ -397,6 +470,10 @@ def create_build_folders(temp_dir, build_id): async def make_local_modifications(db: Session, redis: Redis, frame: Frame, source_dir: str): + """ + Write out scene, app, driver code into the Nim sources + according to the current frame config. + """ shutil.rmtree(os.path.join(source_dir, "src", "scenes"), ignore_errors=True) os.makedirs(os.path.join(source_dir, "src", "scenes"), exist_ok=True) @@ -408,6 +485,7 @@ async def make_local_modifications(db: Session, redis: Redis, with open(os.path.join(app_dir, filename), "w") as f: f.write(code) + # Write out each scene as scene_{id}.nim for scene in frame.scenes: try: scene_source = write_scene_nim(frame, scene) @@ -420,12 +498,14 @@ async def make_local_modifications(db: Session, redis: Redis, f"({scene.get('id','default')}): {e}") raise + # scenes.nim aggregator with open(os.path.join(source_dir, "src", "scenes", "scenes.nim"), "w") as f: source = write_scenes_nim(frame) f.write(source) if frame.debug: await log(db, redis, int(frame.id), "stdout", f"Generated scenes.nim:\n{source}") + # drivers.nim drivers = drivers_for_frame(frame) with open(os.path.join(source_dir, "src", "drivers", "drivers.nim"), "w") as f: source = write_drivers_nim(drivers) @@ -433,6 +513,7 @@ async def make_local_modifications(db: Session, redis: Redis, if frame.debug: await log(db, redis, int(frame.id), "stdout", f"Generated drivers.nim:\n{source}") + # waveshare driver if needed if drivers.get("waveshare"): with open(os.path.join(source_dir, "src", "drivers", "waveshare", "driver.nim"), "w") as wf: source = write_waveshare_driver_nim(drivers) @@ -442,6 +523,10 @@ async def make_local_modifications(db: Session, redis: Redis, def compile_line_md5(input_str: str) -> str: + """ + Hash of compile command line, ignoring certain flags + (used in caching logic). + """ words = [] ignore_next = False for word in input_str.split(' '): @@ -464,8 +549,14 @@ async def create_local_build_archive( source_dir: str, temp_dir: str, cpu: str -): +) -> str: + """ + Run Nim to generate the C files (and Makefile scaffolding), + then create a tar.gz of the build directory. + Returns path to the .tar.gz. + """ drivers = drivers_for_frame(frame) + # Copy vendor code if needed if inkyPython := drivers.get('inkyPython'): vendor_folder = inkyPython.vendor_folder or "" os.makedirs(os.path.join(build_dir, "vendor"), exist_ok=True) @@ -500,6 +591,7 @@ async def create_local_build_archive( status, out, err = await exec_local_command(db, redis, frame, cmd) if status != 0: + # Attempt to parse the last Nim error line for context lines = (out or "").split("\n") filtered = [ln for ln in lines if ln.strip()] if filtered: @@ -517,22 +609,23 @@ async def create_local_build_archive( all_lines = of.readlines() await log(db, redis, int(frame.id), "stdout", f"Error in {rel_fn}:{line_nr}:{column}") - await log(db, redis, int(frame.id), "stdout", - f"Line {line_nr}: {all_lines[line_nr - 1]}") - await log(db, redis, int(frame.id), "stdout", - f".......{'.'*(column - 1 + len(str(line_nr)))}^") + if 0 < line_nr <= len(all_lines): + line_text = all_lines[line_nr - 1] + await log(db, redis, int(frame.id), "stdout", f"Line {line_nr}: {line_text}") + caret_prefix = "......." + ('.' * (column - 1 + len(str(line_nr)))) + await log(db, redis, int(frame.id), "stdout", f"{caret_prefix}^") else: await log(db, redis, int(frame.id), "stdout", f"Error in {fn}:{line_nr}:{column}") - raise Exception("Failed to generate frameos sources") + # Copy nimbase.h into build_dir nimbase_path = find_nimbase_file(nim_path) if not nimbase_path: raise Exception("nimbase.h not found") - shutil.copy(nimbase_path, os.path.join(build_dir, "nimbase.h")) + # If waveshare, copy the variant C/h files if waveshare := drivers.get('waveshare'): if waveshare.variant: variant_folder = get_variant_folder(waveshare.variant) @@ -543,7 +636,8 @@ async def create_local_build_archive( os.path.join(build_dir, uf) ) - # color e-paper variants + # color e-paper variants need bc-based filenames + # e.g. EPD_2in9b -> EPD_2in9bc.(c/h) if waveshare.variant in [ "EPD_2in9b", "EPD_2in9c", "EPD_2in13b", "EPD_2in13c", "EPD_4in2b", "EPD_4in2c", "EPD_5in83b", "EPD_5in83c", @@ -552,7 +646,11 @@ async def create_local_build_archive( c_file = re.sub(r'[bc]', 'bc', waveshare.variant) variant_files = [f"{waveshare.variant}.nim", f"{c_file}.c", f"{c_file}.h"] else: - variant_files = [f"{waveshare.variant}.nim", f"{waveshare.variant}.c", f"{waveshare.variant}.h"] + variant_files = [ + f"{waveshare.variant}.nim", + f"{waveshare.variant}.c", + f"{waveshare.variant}.h" + ] for vf in variant_files: shutil.copy( @@ -560,34 +658,43 @@ async def create_local_build_archive( os.path.join(build_dir, vf) ) + # Generate a Makefile from the Nim-generated compile_frameos.sh + nimc.Makefile template with open(os.path.join(build_dir, "Makefile"), "w") as mk: script_path = os.path.join(build_dir, "compile_frameos.sh") linker_flags = ["-pthread", "-lm", "-lrt", "-ldl"] compiler_flags: list[str] = [] - with open(script_path, "r") as sc: - lines_sc = sc.readlines() - for line in lines_sc: - if " -o frameos " in line and " -l" in line: - linker_flags = [ - fl.strip() for fl in line.split(' ') - if fl.startswith('-') and fl != '-o' - ] - elif " -c " in line and not compiler_flags: - compiler_flags = [ - fl for fl in line.split(' ') - if fl.startswith('-') and not fl.startswith('-I') - and fl not in ['-o', '-c', '-D'] - ] - + if os.path.isfile(script_path): + with open(script_path, "r") as sc: + lines_sc = sc.readlines() + for line in lines_sc: + if " -o frameos " in line and " -l" in line: + # This line typically has -o frameos -lpthread -lm etc. + linker_flags = [ + fl.strip() for fl in line.split(' ') + if fl.startswith('-') and fl != '-o' + ] + elif " -c " in line and not compiler_flags: + # Nim's compile command for each .c + compiler_flags = [ + fl for fl in line.split(' ') + if fl.startswith('-') and not fl.startswith('-I') + and fl not in ['-o', '-c', '-D'] + ] + + # Base Makefile template with open(os.path.join(source_dir, "tools", "nimc.Makefile"), "r") as mf_in: lines_make = mf_in.readlines() + for ln in lines_make: if ln.startswith("LIBS = "): ln = "LIBS = -L. " + " ".join(linker_flags) + "\n" if ln.startswith("CFLAGS = "): - ln = "CFLAGS = " + " ".join([f for f in compiler_flags if f != '-c']) + "\n" + # remove '-c' if present + cf = [f for f in compiler_flags if f != '-c'] + ln = "CFLAGS = " + " ".join(cf) + "\n" mk.write(ln) + # Finally, tar up the build directory (which includes .c, .h, Makefile, etc.) archive_path = os.path.join(temp_dir, f"build_{build_id}.tar.gz") zip_base = os.path.join(temp_dir, f"build_{build_id}") shutil.make_archive(zip_base, 'gztar', temp_dir, f"build_{build_id}") @@ -595,6 +702,9 @@ async def create_local_build_archive( def find_nim_executable(): + """ + Try 'nim' in PATH, else guess common install paths on each OS. + """ common_paths = { 'Windows': [ 'C:\\Program Files\\Nim\\bin\\nim.exe', @@ -610,10 +720,8 @@ def find_nim_executable(): '/opt/nim/bin/nim', ] } - if is_executable_in_path('nim'): return 'nim' - os_type = platform.system() for path in common_paths.get(os_type, []): if os.path.isfile(path) and os.access(path, os.X_OK): @@ -622,6 +730,9 @@ def find_nim_executable(): def is_executable_in_path(executable: str): + """ + Check if `executable` is callable from PATH. + """ try: subprocess.run([executable, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) return True @@ -630,26 +741,41 @@ def is_executable_in_path(executable: str): def get_nim_version(executable_path: str): + """ + Return a parsed packaging.version.Version for nim --version. + """ try: result = subprocess.run([executable_path, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True) + # Typically: Nim Compiler Version 2.2.0 [MacOSX: arm64] output = result.stdout.split('\n')[0] - version_str = output.split()[3] - return version.parse(version_str) + # e.g. "Nim Compiler Version 2.2.0" + parts = output.split() + # The version is usually parts[3], but let's be defensive + for p in parts: + if re.match(r'^\d+(\.\d+){1,2}', p): + return version.parse(p) + return None except Exception as e: print(f"Error getting Nim version: {e}") return None def find_nimbase_file(nim_executable: str): + """ + Attempt to find nimbase.h in Nim's lib directory, + scanning possible locations on each OS. + """ nimbase_paths: list[str] = [] + # Nim 2.x 'nim dump' can yield Nim paths in stderr try: nim_dump_output = subprocess.run( [nim_executable, "dump"], text=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE ).stderr + # Collect lines that reference 'lib' nimbase_paths.extend(line for line in nim_dump_output.splitlines() if 'lib' in line) except subprocess.CalledProcessError as e: print(f"Error running 'nim dump': {e}") @@ -663,11 +789,7 @@ def find_nimbase_file(nim_executable: str): elif os_type == 'Windows': nimbase_paths.append('C:\\Nim\\lib') - for path in nimbase_paths: - nb_file = os.path.join(path, 'nimbase.h') - if os.path.isfile(nb_file): - return nb_file - + # Also check Homebrew Cellar for Nim if os_type == 'Darwin': base_dir = '/opt/homebrew/Cellar/nim/' if os.path.exists(base_dir): @@ -675,4 +797,11 @@ def find_nimbase_file(nim_executable: str): nb_file = os.path.join(base_dir, verdir, 'nim', 'lib', 'nimbase.h') if os.path.isfile(nb_file): return nb_file + + # See if any path leads to nimbase.h + for path in nimbase_paths: + nb_file = os.path.join(path, 'nimbase.h') + if os.path.isfile(nb_file): + return nb_file + return None diff --git a/frameos/tools/nimc.Makefile b/frameos/tools/nimc.Makefile index fe8dd481..5eee68eb 100644 --- a/frameos/tools/nimc.Makefile +++ b/frameos/tools/nimc.Makefile @@ -19,7 +19,7 @@ clean: pre-build: @mkdir -p ../cache - @echo "Compiling on device, largest files first. This might take minutes on the first run." + @echo "Compiling. This might take minutes on the first run." $(OBJECTS): pre-build From 64cb3ffb8fe9f341964565a88a3017efae40c3db Mon Sep 17 00:00:00 2001 From: Marius Andra Date: Wed, 12 Mar 2025 00:28:42 +0100 Subject: [PATCH 3/4] it looks like it works --- Dockerfile | 82 +++++++++++++++++++++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 26 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8bbe2aac..bc5ab3b3 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,21 +4,28 @@ FROM python:3.11-slim-bullseye # Set the working directory WORKDIR /app -# Install Node.js based on platform -RUN apt-get update && apt-get install -y curl build-essential libffi-dev redis-server ca-certificates gnupg \ +# ------------------------------------------------------------------ +# 1. Install system packages and Node.js +# ------------------------------------------------------------------ +RUN apt-get update && apt-get install -y \ + curl build-essential libffi-dev redis-server ca-certificates gnupg \ && mkdir -p /etc/apt/keyrings \ && curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg \ && NODE_MAJOR=18 \ - && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" | tee /etc/apt/sources.list.d/nodesource.list \ + && echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_$NODE_MAJOR.x nodistro main" \ + | tee /etc/apt/sources.list.d/nodesource.list \ && apt-get update \ && apt-get install -y nodejs -# Install Nim +# ------------------------------------------------------------------ +# 2. Install Nim from source +# ------------------------------------------------------------------ RUN apt-get update && \ - apt-get install -y curl xz-utils gcc openssl ca-certificates git # && + apt-get install -y curl xz-utils gcc openssl ca-certificates git RUN mkdir -p /opt/nim && \ - curl -L https://nim-lang.org/download/nim-2.2.0.tar.xz | tar -xJf - -C /opt/nim --strip-components=1 && \ + curl -L https://nim-lang.org/download/nim-2.2.2.tar.xz \ + | tar -xJf - -C /opt/nim --strip-components=1 && \ cd /opt/nim && \ sh build.sh && \ bin/nim c koch && \ @@ -27,34 +34,54 @@ RUN mkdir -p /opt/nim && \ ENV PATH="/opt/nim/bin:${PATH}" -RUN nim --version \ - nimble --version +RUN nim --version && nimble --version -# Add ARM64 architecture and install cross compiler and cross libraries +# ------------------------------------------------------------------ +# 3. Add ARM64 architecture & install cross-compiler + stdlib +# ------------------------------------------------------------------ RUN dpkg --add-architecture arm64 \ && apt-get update \ && apt-get install -y \ crossbuild-essential-arm64 \ libc6-dev:arm64 \ - && rm -rf /var/lib/apt/lists/* - -# Copy the requirements file and install using pip + pkg-config \ + libevdev-dev:arm64 \ + wget + +# ------------------------------------------------------------------ +# 4. Build + install liblgpio for ARM64 from source +# (so -llgpio can be found by the cross-compiler) +# ------------------------------------------------------------------ +RUN mkdir -p /tmp/lgpio-install && \ + cd /tmp/lgpio-install && \ + wget -q -O v0.2.2.tar.gz https://github.com/joan2937/lg/archive/refs/tags/v0.2.2.tar.gz && \ + tar -xzf v0.2.2.tar.gz && \ + cd lg-0.2.2 && \ + CC=aarch64-linux-gnu-gcc make && \ + make install && \ + # Copy compiled libs into multiarch paths + mkdir -p /usr/lib/aarch64-linux-gnu /usr/include/aarch64-linux-gnu && \ + cp /usr/local/lib/liblg*.so* /usr/lib/aarch64-linux-gnu/ && \ + cp /usr/local/include/lgpio.h /usr/include/aarch64-linux-gnu/ && \ + ldconfig && \ + cd / && rm -rf /tmp/lgpio-install + +# ------------------------------------------------------------------ +# 5. Install Python dependencies +# ------------------------------------------------------------------ WORKDIR /app/backend COPY backend/requirements.txt . RUN pip3 install --upgrade uv \ && uv venv \ && uv pip install --no-cache-dir -r requirements.txt -# Change the working directory for npm install +# ------------------------------------------------------------------ +# 6. Install and build frontend +# ------------------------------------------------------------------ WORKDIR /tmp/frontend - -# Copy the npm configuration files COPY frontend/package.json frontend/package-lock.json /tmp/frontend/ - -# Install npm packages RUN npm install -# Copy frontend source files and run build COPY frontend/ ./ COPY version.json ../ RUN npm run build @@ -62,16 +89,20 @@ RUN npm run build # Delete all files except the dist and schema folders RUN find . -maxdepth 1 ! -name 'dist' ! -name 'schema' ! -name '.' ! -name '..' -exec rm -rf {} \; -# Cleanup node installations and build tools -RUN apt-get remove -y nodejs curl build-essential libffi-dev ca-certificates gnupg \ +# ------------------------------------------------------------------ +# 7. Clean up unneeded Nodejs & other packages +# (Keeping crossbuild-essential-arm64 + build-essential for cross-compiling) +# ------------------------------------------------------------------ +RUN apt-get remove -y nodejs curl libffi-dev ca-certificates gnupg \ && apt-get autoremove -y \ && apt-get clean \ && rm -rf /app/frontend/node_modules \ && rm -rf /var/lib/apt/lists/* /root/.npm -# Change back to the main directory +# ------------------------------------------------------------------ +# 8. Prepare Nim environment +# ------------------------------------------------------------------ WORKDIR /app/frameos - COPY frameos/frameos.nimble ./ COPY frameos/nimble.lock ./ COPY frameos/nim.cfg ./ @@ -79,12 +110,11 @@ COPY frameos/nim.cfg ./ # Cache nimble deps for when deploying on frame RUN nimble install -d -y && nimble setup -# Change back to the main directory +# ------------------------------------------------------------------ +# 9. Move final built frontend into /app +# ------------------------------------------------------------------ WORKDIR /app - -# Copy the rest of the application to the container COPY . . - RUN rm -rf /app/frontend && mv /tmp/frontend /app/ EXPOSE 8989 From 5e5600db5fdc62e0d69b80ab7ba0d90c9ada3771 Mon Sep 17 00:00:00 2001 From: Marius Andra Date: Mon, 17 Mar 2025 00:07:22 +0100 Subject: [PATCH 4/4] bunch more steps --- Dockerfile | 59 +- backend/app/tasks/deploy_frame.py | 907 +++++++++++++++++------------- frameos/frameos.nimble | 1 + frameos/nimble.lock | 47 +- frameos/tools/nimc.Makefile | 5 +- 5 files changed, 588 insertions(+), 431 deletions(-) diff --git a/Dockerfile b/Dockerfile index bc5ab3b3..60a4f642 100644 --- a/Dockerfile +++ b/Dockerfile @@ -37,37 +37,54 @@ ENV PATH="/opt/nim/bin:${PATH}" RUN nim --version && nimble --version # ------------------------------------------------------------------ -# 3. Add ARM64 architecture & install cross-compiler + stdlib +# 3. Add BOTH arm64 + armhf architectures & install cross toolchains # ------------------------------------------------------------------ RUN dpkg --add-architecture arm64 \ + && dpkg --add-architecture armhf \ && apt-get update \ && apt-get install -y \ crossbuild-essential-arm64 \ + crossbuild-essential-armhf \ libc6-dev:arm64 \ - pkg-config \ + libc6-dev:armhf \ libevdev-dev:arm64 \ + libevdev-dev:armhf \ + pkg-config \ wget # ------------------------------------------------------------------ -# 4. Build + install liblgpio for ARM64 from source -# (so -llgpio can be found by the cross-compiler) +# 4. Build + install liblgpio for ARM64 (aarch64) to DESTDIR +# so we don't overwrite the default /usr/local/lib # ------------------------------------------------------------------ -RUN mkdir -p /tmp/lgpio-install && \ - cd /tmp/lgpio-install && \ +# Build + install liblgpio for ARM64 +RUN mkdir -p /tmp/lgpio-arm64 && cd /tmp/lgpio-arm64 && \ wget -q -O v0.2.2.tar.gz https://github.com/joan2937/lg/archive/refs/tags/v0.2.2.tar.gz && \ - tar -xzf v0.2.2.tar.gz && \ - cd lg-0.2.2 && \ - CC=aarch64-linux-gnu-gcc make && \ - make install && \ - # Copy compiled libs into multiarch paths + tar -xzf v0.2.2.tar.gz && cd lg-0.2.2 && \ + make clean && make CROSS_PREFIX=aarch64-linux-gnu- && \ + make DESTDIR=/tmp/install-arm64 install && \ + # Remove any /usr/local/lib stuff (if placed) and move final libs + rm -f /usr/local/lib/liblg*.so* && \ mkdir -p /usr/lib/aarch64-linux-gnu /usr/include/aarch64-linux-gnu && \ - cp /usr/local/lib/liblg*.so* /usr/lib/aarch64-linux-gnu/ && \ - cp /usr/local/include/lgpio.h /usr/include/aarch64-linux-gnu/ && \ - ldconfig && \ - cd / && rm -rf /tmp/lgpio-install + cp /tmp/install-arm64/usr/local/lib/liblg*.so* /usr/lib/aarch64-linux-gnu/ && \ + cp /tmp/install-arm64/usr/local/include/lgpio.h /usr/include/aarch64-linux-gnu/ && \ + ldconfig && cd / && rm -rf /tmp/lgpio-arm64 /tmp/install-arm64 + +# ------------------------------------------------------------------ +# 5. Build + install liblgpio for ARMHF (arm-linux-gnueabihf) to DESTDIR +# ------------------------------------------------------------------ + RUN mkdir -p /tmp/lgpio-armhf && cd /tmp/lgpio-armhf && \ + wget -q -O v0.2.2.tar.gz https://github.com/joan2937/lg/archive/refs/tags/v0.2.2.tar.gz && \ + tar -xzf v0.2.2.tar.gz && cd lg-0.2.2 && \ + make clean && make CROSS_PREFIX=arm-linux-gnueabihf- && \ + make DESTDIR=/tmp/install-armhf install && \ + rm -f /usr/local/lib/liblg*.so* && \ + mkdir -p /usr/lib/arm-linux-gnueabihf /usr/include/arm-linux-gnueabihf && \ + cp /tmp/install-armhf/usr/local/lib/liblg*.so* /usr/lib/arm-linux-gnueabihf/ && \ + cp /tmp/install-armhf/usr/local/include/lgpio.h /usr/include/arm-linux-gnueabihf/ && \ + ldconfig && cd / && rm -rf /tmp/lgpio-armhf /tmp/install-armhf # ------------------------------------------------------------------ -# 5. Install Python dependencies +# 6. Install Python dependencies # ------------------------------------------------------------------ WORKDIR /app/backend COPY backend/requirements.txt . @@ -76,7 +93,7 @@ RUN pip3 install --upgrade uv \ && uv pip install --no-cache-dir -r requirements.txt # ------------------------------------------------------------------ -# 6. Install and build frontend +# 7. Install and build frontend # ------------------------------------------------------------------ WORKDIR /tmp/frontend COPY frontend/package.json frontend/package-lock.json /tmp/frontend/ @@ -90,8 +107,8 @@ RUN npm run build RUN find . -maxdepth 1 ! -name 'dist' ! -name 'schema' ! -name '.' ! -name '..' -exec rm -rf {} \; # ------------------------------------------------------------------ -# 7. Clean up unneeded Nodejs & other packages -# (Keeping crossbuild-essential-arm64 + build-essential for cross-compiling) +# 8. Clean up unneeded Nodejs & other packages +# (Keeping crossbuild-essential-* and build-essential) # ------------------------------------------------------------------ RUN apt-get remove -y nodejs curl libffi-dev ca-certificates gnupg \ && apt-get autoremove -y \ @@ -100,7 +117,7 @@ RUN apt-get remove -y nodejs curl libffi-dev ca-certificates gnupg \ && rm -rf /var/lib/apt/lists/* /root/.npm # ------------------------------------------------------------------ -# 8. Prepare Nim environment +# 9. Prepare Nim environment # ------------------------------------------------------------------ WORKDIR /app/frameos COPY frameos/frameos.nimble ./ @@ -111,7 +128,7 @@ COPY frameos/nim.cfg ./ RUN nimble install -d -y && nimble setup # ------------------------------------------------------------------ -# 9. Move final built frontend into /app +# 10. Move final built frontend into /app # ------------------------------------------------------------------ WORKDIR /app COPY . . diff --git a/backend/app/tasks/deploy_frame.py b/backend/app/tasks/deploy_frame.py index 042b86f3..5b4abd92 100644 --- a/backend/app/tasks/deploy_frame.py +++ b/backend/app/tasks/deploy_frame.py @@ -1,6 +1,5 @@ from datetime import datetime, timezone import json -import hashlib import os import random import re @@ -38,6 +37,12 @@ async def deploy_frame_task(ctx: dict[str, Any], id: int): """ Main deployment logic for building, packaging, and deploying the Nim (FrameOS) application onto a target device via SSH. + Changes made: + 1) If cross-compiling, only the final `frameos` binary (and vendor if needed) + is uploaded, not the full C source code. + 2) Download minimal `libevdev.so.*` and `liblgpio.so.*` plus relevant headers + from the Pi to local sysroot so we can link the same version that the Pi has. + 3) If apt fails for `liblgpio-dev`, compile from source on the Pi. """ db: Session = ctx['db'] redis: Redis = ctx['redis'] @@ -68,185 +73,247 @@ async def deploy_frame_task(ctx: dict[str, Any], id: int): nim_path = find_nim_v2() ssh = await get_ssh_connection(db, redis, frame) - async def install_if_necessary(pkg: str, raise_on_error=True) -> int: - """ - Installs package `pkg` on the remote device if it's not already installed. - """ - cmd = f"dpkg -l | grep -q \"^ii {pkg}\" || sudo apt-get install -y {pkg}" - return await exec_command(db, redis, frame, ssh, cmd, raise_on_error=raise_on_error) - - # 1. Detect target architecture on the remote device. + # 1. Determine the remote CPU architecture await log(db, redis, id, "stdout", "- Getting target architecture") uname_output: list[str] = [] await exec_command(db, redis, frame, ssh, "uname -m", uname_output) arch = "".join(uname_output).strip() - - # Simplify arch -> "arm64", "arm", "amd64", "i386" ... cpu = get_target_cpu(arch) - # Check total memory (for Pi Zero, etc.) so we can stop the service during local (on-device) compile + # For ARM Pi: pass extra march flags for ARMv6 or ARMv7 + pass_c_l_flags = "" + if arch == "armv6l": + pass_c_l_flags = "-march=armv6 -mfpu=vfp -mfloat-abi=hard -mtune=arm1176jzf-s -marm" + elif arch == "armv7l": + pass_c_l_flags = "-march=armv7-a -mfloat-abi=hard -mfpu=vfpv3 -mtune=cortex-a7 -marm" + + # 2. We will install needed dependencies on the Pi: + # build-essential is only needed if we end up *not* cross-compiling. + # But let's ensure the Pi can also run code that uses evdev, lgpio, etc. + # We'll also handle the possibility that `liblgpio-dev` is missing in apt. + await log(db, redis, id, "stdout", "- Installing required packages on the Pi (if available)") + # We'll do a helper function for apt installs: + pkgs = ["ntp", "libevdev-dev"] + # We do NOT add "build-essential" here by default. We'll do it conditionally if we need on-device build. + for pkg in pkgs: + await install_if_necessary(db, redis, frame, ssh, pkg, raise_on_error=False) + + # 2B. Try installing `liblgpio-dev`, if not found -> compile from source + rc = await install_if_necessary(db, redis, frame, ssh, "liblgpio-dev", raise_on_error=False) + if rc != 0: + # We'll do the same approach we used for waveshare: + await log(db, redis, id, "stdout", "--> Could not find liblgpio-dev. Installing from source.") + command = ( + "if [ ! -f /usr/local/include/lgpio.h ]; then " + " rm -rf /tmp/lgpio-install && " + " mkdir -p /tmp/lgpio-install && " + " cd /tmp/lgpio-install && " + " wget -q -O v0.2.2.tar.gz https://github.com/joan2937/lg/archive/refs/tags/v0.2.2.tar.gz && " + " tar -xzf v0.2.2.tar.gz && " + " cd lg-0.2.2 && " + " make && " + " sudo make install && " + " sudo rm -rf /tmp/lgpio-install; " + "fi" + ) + await exec_command(db, redis, frame, ssh, command) + + # 2C. Scenes might require apt packages + all_deps = get_apt_dependencies_from_scenes(db, redis, frame) + for dep in all_deps: + await install_if_necessary(db, redis, frame, ssh, dep) + + # 3. Check if we can cross-compile. Otherwise we’ll compile on the device. + cross_compiler = get_cross_compiler_for_cpu(cpu) + do_cross_compile = False + if cross_compiler: + rc, _, _ = await exec_local_command(db, redis, frame, f"{cross_compiler} --version", generate_log=False) + if rc == 0: + do_cross_compile = True + + # 4. If do_cross_compile, fetch minimal libs+headers from Pi for local linking + # (we only need libevdev & liblgpio plus their includes). + local_sysroot_dir = None + if do_cross_compile: + await log(db, redis, id, "stdout", f"- Found cross-compiler '{cross_compiler}' for {cpu}") + # TODO: delete this later? preserve it? + local_sysroot_dir = os.path.join(tempfile.gettempdir(), f"sysroot_{frame.id}_{build_id}") + # local_sysroot_dir = os.path.abspath(f"./sysroot_{frame.id}_{build_id}") + if not os.path.exists(local_sysroot_dir): + os.makedirs(local_sysroot_dir, exist_ok=True) + + # 4A. Download the relevant .so libs from the Pi + # We'll store them in e.g. sysroot/usr/lib/arm-linux-gnueabihf + remote_libs_tar = f"/tmp/libs_{build_id}.tar.gz" + cmd = ( + f"sudo tar -czf {remote_libs_tar} " + f"/usr/lib/arm-linux-gnueabihf/libarmmem* " + f"/usr/lib/arm-linux-gnueabihf/libm.so* " + f"/usr/lib/arm-linux-gnueabihf/libd.so* " + f"/usr/lib/arm-linux-gnueabihf/libpthread.so* " + f"/usr/lib/arm-linux-gnueabihf/libc.so* " + f"/usr/lib/arm-linux-gnueabihf/liblgpio.so* " + "2>/dev/null || true" # just in case some file is missing + ) + await exec_command(db, redis, frame, ssh, cmd) + local_libs_tar = os.path.join(local_sysroot_dir, "libs.tar.gz") + await asyncssh.scp((ssh, remote_libs_tar), local_libs_tar) + # Clean up remote tar + await exec_command(db, redis, frame, ssh, f"sudo rm -f {remote_libs_tar}") + + # Extract to sysroot/usr/lib/arm-linux-gnueabihf + sysroot_lib_dir = os.path.join(local_sysroot_dir, "usr", "lib", "arm-linux-gnueabihf") + os.makedirs(sysroot_lib_dir, exist_ok=True) + shutil.unpack_archive(local_libs_tar, local_sysroot_dir) + os.remove(local_libs_tar) + + # 4B. Download relevant includes: often /usr/include/libevdev-1.0 & the lgpio.h + remote_inc_tar = f"/tmp/includes_{build_id}.tar.gz" + cmd = ( + f"sudo tar -czf {remote_inc_tar} " + f"/usr/include/libevdev-1.0 " + f"/usr/include/arm-linux-gnueabihf/lgpio.h " + f"/usr/local/include/lgpio.h " + "2>/dev/null || true" + ) + await exec_command(db, redis, frame, ssh, cmd) + local_inc_tar = os.path.join(local_sysroot_dir, "includes.tar.gz") + await asyncssh.scp((ssh, remote_inc_tar), local_inc_tar) + await exec_command(db, redis, frame, ssh, f"sudo rm -f {remote_inc_tar}") + # Extract them into local sysroot + shutil.unpack_archive(local_inc_tar, local_sysroot_dir) + os.remove(local_inc_tar) + + # 5. Possibly handle low memory Pi if we are building on-device total_memory = 0 try: mem_output: list[str] = [] await exec_command(db, redis, frame, ssh, "free -m", mem_output) - # mem_output[0]: " total used free shared buff/cache available" - # mem_output[1]: "Mem: 991 223 ... etc." - # We'll parse line 1 total_memory = int(mem_output[1].split()[1]) except Exception as e: await log(db, redis, id, "stderr", str(e)) - low_memory = total_memory < 512 + low_memory = (total_memory < 512) - drivers = drivers_for_frame(frame) + if not do_cross_compile: + # We may need to compile on the Pi + await install_if_necessary(db, redis, frame, ssh, "build-essential") + if low_memory: + await log(db, redis, id, "stdout", "- Low memory device, stopping FrameOS for compilation") + await exec_command(db, redis, frame, ssh, "sudo service frameos stop", raise_on_error=False) - # 2. Build or cross-compile locally, then package up the result + # 6. Generate Nim -> C code locally and optionally cross-compile + drivers = drivers_for_frame(frame) with tempfile.TemporaryDirectory() as temp_dir: - await log(db, redis, id, "stdout", "- Copying build folders") - build_dir, source_dir = create_build_folders(temp_dir, build_id) + await log(db, redis, id, "stdout", "- Creating local Nim build (C sources)") - await log(db, redis, id, "stdout", "- Applying local modifications") + build_dir, source_dir = create_build_folders(temp_dir, build_id) await make_local_modifications(db, redis, frame, source_dir) - await log(db, redis, id, "stdout", "- Creating build archive") - - # Decide if we can cross-compile for the target - do_cross_compile = False - cross_compiler = get_cross_compiler_for_cpu(cpu) - if cross_compiler: - response_code, _, _ = await exec_local_command(db, redis, frame, f"{cross_compiler} --version", generate_log=False) - if response_code == 0: - do_cross_compile = True - - # 2A. Generate Nim -> C code. This always happens locally (we need the C code). - archive_path = await create_local_build_archive( + # Just produce C code + Makefile + c_archive_path = await create_local_build_archive( db, redis, frame, - build_dir, build_id, nim_path, source_dir, temp_dir, cpu + build_dir, build_id, nim_path, source_dir, temp_dir, cpu, + pass_c_l_flags, + do_cross_compile ) - # 2B. If cross-compiling is possible, actually produce final `frameos` locally. - if do_cross_compile: - await log(db, redis, id, "stdout", f"- Cross compiling for {cpu} using {cross_compiler}") - cmd = f"cd {build_dir} && make clean && make CC={cross_compiler} -j$(nproc)" - status, out, err = await exec_local_command(db, redis, frame, cmd) - if status != 0: - raise Exception("Cross-compilation failed. See logs for details.") - - # Re-create the tar AFTER the final binary is present - # We remove the old one, then tar again so it has `frameos` in it. - os.remove(archive_path) - archive_path = os.path.join(temp_dir, f"build_{build_id}.tar.gz") - zip_base = os.path.join(temp_dir, f"build_{build_id}") - shutil.make_archive(zip_base, 'gztar', temp_dir, f"build_{build_id}") - - # 3. On low-memory devices, stop FrameOS if we must compile there - # i.e. we do NOT have cross_compiled. - if not do_cross_compile and low_memory: - await log(db, redis, id, "stdout", "- Low memory device, stopping FrameOS for compilation") - await exec_command(db, redis, frame, ssh, "sudo service frameos stop", raise_on_error=False) - - # 4. Install build dependencies on device (only if we plan to compile on device) - if not do_cross_compile: - await install_if_necessary("build-essential") - - await install_if_necessary("ntp") # Keep time in sync + frameos_binary_path = os.path.join(build_dir, "frameos") - if drivers.get("evdev"): - await install_if_necessary("libevdev-dev") + if do_cross_compile and local_sysroot_dir: + # 6A. Actually compile locally with cross_compiler + await log(db, redis, id, "stdout", "- Cross compiling `frameos` with the Pi's libraries + headers") - # 4A. Install liblgpio if needed (for waveshare or gpioButton). - if drivers.get("waveshare") or drivers.get("gpioButton"): - check_lgpio = await exec_command( - db, redis, frame, ssh, - '[[ -f "/usr/local/include/lgpio.h" || -f "/usr/include/lgpio.h" ]] && exit 0 || exit 1', - raise_on_error=False + # Provide CFLAGS with path to local sysroot + sysroot_flags = ( + f"--sysroot={local_sysroot_dir} " + f"-I{local_sysroot_dir}/usr/include " + f"-L{local_sysroot_dir}/usr/lib/arm-linux-gnueabihf " + ) + # We also apply our pass_c_l_flags (-march=...) + # plus the libraries Nim might link: -levdev -llgpio + make_cmd = ( + f"cd {build_dir} && make clean && " + f"make -j$(nproc) CC={cross_compiler} " + f"\"SYSROOT={sysroot_flags}\" " + ) + status, _, _ = await exec_local_command(db, redis, frame, make_cmd) + if status != 0: + raise Exception("Cross-compilation with sysroot failed.") + else: + # 6B. On-device compile approach + await exec_command(db, redis, frame, ssh, "mkdir -p /srv/frameos/build/ /srv/frameos/logs/") + await log(db, redis, id, "stdout", f"> add /srv/frameos/build/build_{build_id}.tar.gz") + + # Upload the entire C code tar to compile on Pi + await asyncssh.scp( + c_archive_path, + (ssh, f"/srv/frameos/build/build_{build_id}.tar.gz"), + recurse=False ) - if check_lgpio != 0: - # Try installing liblgpio-dev - if await install_if_necessary("liblgpio-dev", raise_on_error=False) != 0: - await log(db, redis, id, "stdout", "--> Could not find liblgpio-dev. Installing from source.") - command = ( - "if [ ! -f /usr/local/include/lgpio.h ]; then " - " rm -rf /tmp/lgpio-install && " - " mkdir -p /tmp/lgpio-install && " - " cd /tmp/lgpio-install && " - " wget -q -O v0.2.2.tar.gz https://github.com/joan2937/lg/archive/refs/tags/v0.2.2.tar.gz && " - " tar -xzf v0.2.2.tar.gz && " - " cd lg-0.2.2 && " - " make && " - " sudo make install && " - " sudo rm -rf /tmp/lgpio-install; " - "fi" - ) - await exec_command(db, redis, frame, ssh, command) - - # 5. Check for app dependencies (APT packages declared in config.json) - all_deps = set() - for scene in frame.scenes: - try: - for node in scene.get('nodes', []): - try: - config: Optional[dict[str, str]] = None - if node.get('type') == 'app': - app = node.get('data', {}).get('keyword') - if app: - json_config = get_one_app_sources(app).get('config.json') - if json_config: - config = json.loads(json_config) - elif node.get('type') == 'source': - json_config = node.get('sources', {}).get('config.json') - if json_config: - config = json.loads(json_config) - if config and config.get('apt'): - for dep in config['apt']: - all_deps.add(dep) - except Exception as e: - await log(db, redis, id, "stderr", f"Error parsing node: {e}") - except Exception as e: - await log(db, redis, id, "stderr", f"Error parsing scene: {e}") - - for dep in all_deps: - await install_if_necessary(dep) - - # Ensure /srv/frameos on device - await exec_command(db, redis, frame, ssh, - "if [ ! -d /srv/frameos/ ]; then " - " sudo mkdir -p /srv/frameos/ && sudo chown $(whoami):$(whoami) /srv/frameos/; " - "fi") - - await exec_command(db, redis, frame, ssh, "mkdir -p /srv/frameos/build/ /srv/frameos/logs/") - await log(db, redis, id, "stdout", f"> add /srv/frameos/build/build_{build_id}.tar.gz") - - # 6. Upload the local tarball to the device - await asyncssh.scp( - archive_path, - (ssh, f"/srv/frameos/build/build_{build_id}.tar.gz"), - recurse=False - ) - - # 7. If we haven't cross-compiled locally, compile on the device - await exec_command( - db, redis, frame, ssh, - f"cd /srv/frameos/build && tar -xzf build_{build_id}.tar.gz && rm build_{build_id}.tar.gz" - ) - - if not do_cross_compile: - # device-based compile await exec_command( db, redis, frame, ssh, + f"cd /srv/frameos/build && tar -xzf build_{build_id}.tar.gz && rm build_{build_id}.tar.gz" + ) + compile_cmd = ( f"cd /srv/frameos/build/build_{build_id} && " "PARALLEL_MEM=$(awk '/MemTotal/{printf \"%.0f\\n\", $2/1024/250}' /proc/meminfo) && " "PARALLEL=$(($PARALLEL_MEM < $(nproc) ? $PARALLEL_MEM : $(nproc))) && " "make -j$PARALLEL" ) + await exec_command(db, redis, frame, ssh, compile_cmd) - # 8. Move final binary into a release folder - await exec_command(db, redis, frame, ssh, f"mkdir -p /srv/frameos/releases/release_{build_id}") - await exec_command( - db, redis, frame, ssh, - f"cp /srv/frameos/build/build_{build_id}/frameos /srv/frameos/releases/release_{build_id}/frameos" - ) + # 7. Upload final `frameos` executable (if cross-compiled), plus vendor if needed + release_path = f"/srv/frameos/releases/release_{build_id}" + if do_cross_compile: + # We skip uploading the entire build_{build_id} folder. Just upload the `frameos`. + await exec_command(db, redis, frame, ssh, + f"mkdir -p {release_path}") + # TODO: compress + await asyncssh.scp( + frameos_binary_path, + (ssh, f"{release_path}/frameos"), + recurse=False + ) + # If there's vendor code (e.g. inky) we still need to copy that to the Pi, + # because e.g. the Python environment is needed at runtime. + vendor_tar = None + if requires_vendor_upload(drivers): + vendor_tar = os.path.join(temp_dir, f"vendor_{build_id}.tar.gz") + vendor_folder_temp = os.path.join(temp_dir, "vendor") + os.makedirs(vendor_folder_temp, exist_ok=True) + copy_vendor_folders(drivers, vendor_folder_temp) + shutil.make_archive( + base_name=os.path.join(temp_dir, f"vendor_{build_id}"), + format='gztar', + root_dir=temp_dir, + base_dir="vendor" + ) + await exec_command(db, redis, frame, ssh, "mkdir -p /srv/frameos/build/vendor_temp") + await asyncssh.scp(vendor_tar, + (ssh, f"/srv/frameos/build/vendor_temp/vendor_{build_id}.tar.gz"), + recurse=False) + await exec_command( + db, redis, frame, ssh, + f"cd /srv/frameos/build/vendor_temp && " + f"tar -xzf vendor_{build_id}.tar.gz && rm vendor_{build_id}.tar.gz" + ) + # Then we can move that vendor code to the new release + await exec_command( + db, redis, frame, ssh, + "mkdir -p /srv/frameos/vendor && " + "cp -r /srv/frameos/build/vendor_temp/vendor/* /srv/frameos/vendor/" + ) + await exec_command(db, redis, frame, ssh, "rm -rf /srv/frameos/build/vendor_temp") + + else: + # We compiled on the Pi. The final binary is at /srv/frameos/build/build_{build_id}/frameos + await exec_command(db, redis, frame, ssh, f"mkdir -p {release_path}") + await exec_command( + db, redis, frame, ssh, + f"cp /srv/frameos/build/build_{build_id}/frameos {release_path}/frameos" + ) - # 9. Upload frame.json to the new release + # 8. Upload frame.json frame_json_data = (json.dumps(get_frame_json(db, frame), indent=4) + "\n").encode('utf-8') with tempfile.NamedTemporaryFile(suffix=".json", delete=False) as tmpf: local_json_path = tmpf.name @@ -254,147 +321,68 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: await asyncssh.scp( local_json_path, - (ssh, f"/srv/frameos/releases/release_{build_id}/frame.json"), + (ssh, f"{release_path}/frame.json"), recurse=False ) os.remove(local_json_path) - await log(db, redis, id, "stdout", f"> add /srv/frameos/releases/release_{build_id}/frame.json") + await log(db, redis, id, "stdout", f"> add {release_path}/frame.json") - # 10. Vendor steps for certain drivers - if inkyPython := drivers.get("inkyPython"): - await exec_command(db, redis, frame, ssh, - f"mkdir -p /srv/frameos/vendor && " - f"cp -r /srv/frameos/build/build_{build_id}/vendor/inkyPython /srv/frameos/vendor/") - await install_if_necessary("python3-pip") - await install_if_necessary("python3-venv") - await exec_command(db, redis, frame, ssh, - f"cd /srv/frameos/vendor/{inkyPython.vendor_folder} && " - "([ ! -d env ] && python3 -m venv env || echo 'env exists') && " - "(sha256sum -c requirements.txt.sha256sum 2>/dev/null || " - "(echo '> env/bin/pip3 install -r requirements.txt' && " - "env/bin/pip3 install -r requirements.txt && " - "sha256sum requirements.txt > requirements.txt.sha256sum))") - - if inkyHyperPixel2r := drivers.get("inkyHyperPixel2r"): + # 9. If inky vendor, set up Python venv on the Pi + await install_inky_vendors(db, redis, frame, ssh, build_id, drivers) + + # Clean old builds if we did on-device compile + if not do_cross_compile: await exec_command(db, redis, frame, ssh, - f"mkdir -p /srv/frameos/vendor && " - f"cp -r /srv/frameos/build/build_{build_id}/vendor/inkyHyperPixel2r /srv/frameos/vendor/") - await install_if_necessary("python3-dev") - await install_if_necessary("python3-pip") - await install_if_necessary("python3-venv") + "cd /srv/frameos/build && ls -dt1 build_* | tail -n +11 | xargs rm -rf") await exec_command(db, redis, frame, ssh, - f"cd /srv/frameos/vendor/{inkyHyperPixel2r.vendor_folder} && " - "([ ! -d env ] && python3 -m venv env || echo 'env exists') && " - "(sha256sum -c requirements.txt.sha256sum 2>/dev/null || " - "(echo '> env/bin/pip3 install -r requirements.txt' && " - "env/bin/pip3 install -r requirements.txt && " - "sha256sum requirements.txt > requirements.txt.sha256sum))") - - # 11. Upload and enable frameos.service - with open("../frameos/frameos.service", "r") as f: - service_contents = f.read().replace("%I", frame.ssh_user) - service_data = service_contents.encode('utf-8') - with tempfile.NamedTemporaryFile(suffix=".service", delete=False) as tmpservice: - local_service_path = tmpservice.name - tmpservice.write(service_data) - await asyncssh.scp( - local_service_path, - (ssh, f"/srv/frameos/releases/release_{build_id}/frameos.service"), - recurse=False - ) - os.remove(local_service_path) + "cd /srv/frameos/build/cache && " + "find . -type f \\( -atime +0 -a -mtime +0 \\) | xargs rm -rf") - await exec_command(db, redis, frame, ssh, - f"mkdir -p /srv/frameos/state && ln -s /srv/frameos/state " - f"/srv/frameos/releases/release_{build_id}/state") - await exec_command(db, redis, frame, ssh, - f"sudo cp /srv/frameos/releases/release_{build_id}/frameos.service " - f"/etc/systemd/system/frameos.service") - await exec_command(db, redis, frame, ssh, "sudo chown root:root /etc/systemd/system/frameos.service") - await exec_command(db, redis, frame, ssh, "sudo chmod 644 /etc/systemd/system/frameos.service") - - # 12. Link new release to /srv/frameos/current - await exec_command(db, redis, frame, ssh, - f"rm -rf /srv/frameos/current && " - f"ln -s /srv/frameos/releases/release_{build_id} /srv/frameos/current") - - # 13. Sync assets (upload new or changed files in /assets) - await sync_assets(db, redis, frame, ssh) - - # Clean old builds - await exec_command(db, redis, frame, ssh, - "cd /srv/frameos/build && ls -dt1 build_* | tail -n +11 | xargs rm -rf") - await exec_command(db, redis, frame, ssh, - "cd /srv/frameos/build/cache && find . -type f \\( -atime +0 -a -mtime +0 \\) | xargs rm -rf") + # We also remove old releases, except the current symlink await exec_command(db, redis, frame, ssh, "cd /srv/frameos/releases && " "ls -dt1 release_* | grep -v \"$(basename $(readlink ../current))\" " "| tail -n +11 | xargs rm -rf") - # 14. Additional device config if needed - boot_config = "/boot/config.txt" - if await exec_command(db, redis, frame, ssh, "test -f /boot/firmware/config.txt", raise_on_error=False) == 0: - boot_config = "/boot/firmware/config.txt" + # 10. systemd service, link new release + with open("../frameos/frameos.service", "r") as f: + service_contents = f.read().replace("%I", frame.ssh_user) + service_data = service_contents.encode('utf-8') + with tempfile.NamedTemporaryFile(suffix=".service", delete=False) as tmpservice: + local_service_path = tmpservice.name + tmpservice.write(service_data) + await asyncssh.scp( + local_service_path, + (ssh, f"{release_path}/frameos.service"), + recurse=False + ) + os.remove(local_service_path) - if drivers.get("i2c"): - # Ensure i2c is enabled - await exec_command(db, redis, frame, ssh, - f'grep -q "^dtparam=i2c_vc=on$" {boot_config} ' - f'|| echo "dtparam=i2c_vc=on" | sudo tee -a {boot_config}') - await exec_command(db, redis, frame, ssh, - 'command -v raspi-config > /dev/null && ' - 'sudo raspi-config nonint get_i2c | grep -q "1" && { ' - ' sudo raspi-config nonint do_i2c 0; echo "I2C enabled"; ' - '} || echo "I2C already enabled"') - - if drivers.get("spi"): - await exec_command(db, redis, frame, ssh, 'sudo raspi-config nonint do_spi 0') - elif drivers.get("noSpi"): - await exec_command(db, redis, frame, ssh, 'sudo raspi-config nonint do_spi 1') - - # On low memory devices, disable some apt timers - if low_memory: - await exec_command( - db, redis, frame, ssh, - "sudo systemctl mask apt-daily-upgrade && " - "sudo systemctl mask apt-daily && " - "sudo systemctl disable apt-daily.service apt-daily.timer apt-daily-upgrade.timer apt-daily-upgrade.service" - ) + await exec_command(db, redis, frame, ssh, + f"mkdir -p /srv/frameos/state && ln -s /srv/frameos/state {release_path}/state") + await exec_command(db, redis, frame, ssh, + f"sudo cp {release_path}/frameos.service /etc/systemd/system/frameos.service") + await exec_command(db, redis, frame, ssh, "sudo chown root:root /etc/systemd/system/frameos.service") + await exec_command(db, redis, frame, ssh, "sudo chmod 644 /etc/systemd/system/frameos.service") - # Reboot or auto-restart logic - if frame.reboot and frame.reboot.get('enabled') == 'true': - cron_schedule = frame.reboot.get('crontab', '0 0 * * *') - if frame.reboot.get('type') == 'raspberry': - crontab = f"{cron_schedule} root /sbin/shutdown -r now" - else: - crontab = f"{cron_schedule} root systemctl restart frameos.service" - await exec_command(db, redis, frame, ssh, f"echo '{crontab}' | sudo tee /etc/cron.d/frameos-reboot") - else: - await exec_command(db, redis, frame, ssh, "sudo rm -f /etc/cron.d/frameos-reboot") + await exec_command(db, redis, frame, ssh, + f"rm -rf /srv/frameos/current && ln -s {release_path} /srv/frameos/current") - # Possibly append lines to the Pi boot config and require a reboot - must_reboot = False - if drivers.get("bootconfig"): - for line in drivers["bootconfig"].lines: - cmd = f'grep -q "^{line}" {boot_config}' - if await exec_command(db, redis, frame, ssh, cmd, raise_on_error=False) != 0: - # not found in boot_config, so append - await exec_command( - db, redis, frame, ssh, - f'echo "{line}" | sudo tee -a {boot_config}', - log_output=False - ) - must_reboot = True + # 11. Sync assets + await sync_assets(db, redis, frame, ssh) - # Enable & start the service - await exec_command(db, redis, frame, ssh, "sudo systemctl daemon-reload") - await exec_command(db, redis, frame, ssh, "sudo systemctl enable frameos.service") + # 12. Additional config (SPI, I2C, apt timers, etc.) + await handle_additional_device_config(db, redis, frame, ssh, arch, drivers) frame.status = 'starting' frame.last_successful_deploy = frame_dict frame.last_successful_deploy_at = datetime.now(timezone.utc) - # Reboot if boot config changed + # Possibly reboot if bootconfig lines changed + must_reboot = drivers.get("bootconfig") and drivers["bootconfig"].needs_reboot + await exec_command(db, redis, frame, ssh, "sudo systemctl daemon-reload") + await exec_command(db, redis, frame, ssh, "sudo systemctl enable frameos.service") + if must_reboot: await update_frame(db, redis, frame) await log(db, redis, int(frame.id), "stdinfo", "Deployed! Rebooting device after boot config changes") @@ -414,6 +402,50 @@ async def install_if_necessary(pkg: str, raise_on_error=True) -> int: await remove_ssh_connection(db, redis, ssh, frame) +# --------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------- + +async def install_if_necessary(db: Session, redis: Redis, frame: Frame, ssh, pkg: str, raise_on_error=True) -> int: + """ + Installs package `pkg` on the remote device if it's not already installed. + Return code is from `exec_command`. + """ + cmd = f"dpkg -l | grep -q \"^ii {pkg}\" || sudo apt-get install -y {pkg}" + return await exec_command(db, redis, frame, ssh, cmd, raise_on_error=raise_on_error) + + +def get_apt_dependencies_from_scenes(db: Session, redis: Redis, frame: Frame) -> set[str]: + """ + Examine each scene's config for 'apt' dependencies in config.json + and collect them all in a set. + """ + all_deps = set() + for scene in frame.scenes: + try: + for node in scene.get('nodes', []): + try: + config: Optional[dict[str, Any]] = None + if node.get('type') == 'app': + app = node.get('data', {}).get('keyword') + if app: + json_config = get_one_app_sources(app).get('config.json') + if json_config: + config = json.loads(json_config) + elif node.get('type') == 'source': + json_config = node.get('sources', {}).get('config.json') + if json_config: + config = json.loads(json_config) + if config and config.get('apt'): + for dep in config['apt']: + all_deps.add(dep) + except Exception: + pass + except Exception: + pass + return all_deps + + def get_target_cpu(arch: str) -> str: """ Map 'uname -m' output to something Nim expects in --cpu @@ -433,6 +465,8 @@ def get_cross_compiler_for_cpu(cpu: str) -> Optional[str]: """ Return the cross-compiler command for a given CPU, or None if there's no well-known cross-compiler for that CPU. + For Pi Zero/1 (ARMv6) or Pi 2/3 (ARMv7) we guess 'arm-linux-gnueabihf-gcc'. + For 64-bit Pi: 'aarch64-linux-gnu-gcc'. """ if cpu == "arm64": return "aarch64-linux-gnu-gcc" @@ -449,12 +483,62 @@ def find_nim_v2(): nim_path = find_nim_executable() if not nim_path: raise Exception("Nim executable not found") - nim_version = get_nim_version(nim_path) - if not nim_version or nim_version < version.parse("2.0.0"): + nim_ver = get_nim_version(nim_path) + if not nim_ver or nim_ver < version.parse("2.0.0"): raise Exception("Nim 2.0.0 or higher is required") return nim_path +def find_nim_executable(): + common_paths = { + 'Windows': [ + 'C:\\Program Files\\Nim\\bin\\nim.exe', + 'C:\\Nim\\bin\\nim.exe' + ], + 'Darwin': [ + '/opt/homebrew/bin/nim', + '/usr/local/bin/nim' + ], + 'Linux': [ + '/usr/bin/nim', + '/usr/local/bin/nim', + '/opt/nim/bin/nim', + ] + } + # If nim is in the PATH + if is_executable_in_path('nim'): + return 'nim' + os_type = platform.system() + for path in common_paths.get(os_type, []): + if os.path.isfile(path) and os.access(path, os.X_OK): + return path + return None + + +def is_executable_in_path(executable: str): + try: + subprocess.run([executable, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + return True + except FileNotFoundError: + return False + + +def get_nim_version(executable_path: str): + try: + result = subprocess.run([executable_path, '--version'], + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + text=True) + output = result.stdout.split('\n')[0] + parts = output.split() + for p in parts: + if re.match(r'^\d+(\.\d+){1,2}', p): + return version.parse(p) + return None + except Exception as e: + print(f"Error getting Nim version: {e}") + return None + + def create_build_folders(temp_dir, build_id): """ Create local build directories to store Nim source + build artifacts. @@ -485,7 +569,6 @@ async def make_local_modifications(db: Session, redis: Redis, with open(os.path.join(app_dir, filename), "w") as f: f.write(code) - # Write out each scene as scene_{id}.nim for scene in frame.scenes: try: scene_source = write_scene_nim(frame, scene) @@ -498,14 +581,12 @@ async def make_local_modifications(db: Session, redis: Redis, f"({scene.get('id','default')}): {e}") raise - # scenes.nim aggregator with open(os.path.join(source_dir, "src", "scenes", "scenes.nim"), "w") as f: source = write_scenes_nim(frame) f.write(source) if frame.debug: await log(db, redis, int(frame.id), "stdout", f"Generated scenes.nim:\n{source}") - # drivers.nim drivers = drivers_for_frame(frame) with open(os.path.join(source_dir, "src", "drivers", "drivers.nim"), "w") as f: source = write_drivers_nim(drivers) @@ -513,7 +594,7 @@ async def make_local_modifications(db: Session, redis: Redis, if frame.debug: await log(db, redis, int(frame.id), "stdout", f"Generated drivers.nim:\n{source}") - # waveshare driver if needed + # Waveshare driver code (if needed) if drivers.get("waveshare"): with open(os.path.join(source_dir, "src", "drivers", "waveshare", "driver.nim"), "w") as wf: source = write_waveshare_driver_nim(drivers) @@ -522,23 +603,6 @@ async def make_local_modifications(db: Session, redis: Redis, await log(db, redis, int(frame.id), "stdout", f"Generated waveshare driver:\n{source}") -def compile_line_md5(input_str: str) -> str: - """ - Hash of compile command line, ignoring certain flags - (used in caching logic). - """ - words = [] - ignore_next = False - for word in input_str.split(' '): - if word == '-I': - ignore_next = True - elif ignore_next or word.startswith("-I"): - pass - else: - words.append(word) - return hashlib.md5(" ".join(words).encode()).hexdigest() - - async def create_local_build_archive( db: Session, redis: Redis, @@ -548,7 +612,9 @@ async def create_local_build_archive( nim_path: str, source_dir: str, temp_dir: str, - cpu: str + cpu: str, + pass_c_l_flags: str = "", + do_cross_compile: bool = False ) -> str: """ Run Nim to generate the C files (and Makefile scaffolding), @@ -556,42 +622,44 @@ async def create_local_build_archive( Returns path to the .tar.gz. """ drivers = drivers_for_frame(frame) - # Copy vendor code if needed + # Copy vendor folder(s) if needed for e.g. Inky if inkyPython := drivers.get('inkyPython'): vendor_folder = inkyPython.vendor_folder or "" os.makedirs(os.path.join(build_dir, "vendor"), exist_ok=True) - shutil.copytree( - f"../frameos/vendor/{vendor_folder}/", - os.path.join(build_dir, "vendor", vendor_folder), - dirs_exist_ok=True - ) + local_from = f"../frameos/vendor/{vendor_folder}/" + shutil.copytree(local_from, + os.path.join(build_dir, "vendor", vendor_folder), + dirs_exist_ok=True) shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "env"), ignore_errors=True) shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "__pycache__"), ignore_errors=True) if inkyHyperPixel2r := drivers.get('inkyHyperPixel2r'): vendor_folder = inkyHyperPixel2r.vendor_folder or "" os.makedirs(os.path.join(build_dir, "vendor"), exist_ok=True) - shutil.copytree( - f"../frameos/vendor/{vendor_folder}/", - os.path.join(build_dir, "vendor", vendor_folder), - dirs_exist_ok=True - ) + local_from = f"../frameos/vendor/{vendor_folder}/" + shutil.copytree(local_from, + os.path.join(build_dir, "vendor", vendor_folder), + dirs_exist_ok=True) shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "env"), ignore_errors=True) shutil.rmtree(os.path.join(build_dir, "vendor", vendor_folder, "__pycache__"), ignore_errors=True) - await log(db, redis, int(frame.id), "stdout", "- Generating source code for compilation.") + await log(db, redis, int(frame.id), "stdout", "- Generating Nim => C code for compilation.") debug_options = "--lineTrace:on" if frame.debug else "" + extra_passes = "" + if pass_c_l_flags: + extra_passes = f'--passC:"{pass_c_l_flags}" --passL:"{pass_c_l_flags}"' + cmd = ( f"cd {source_dir} && nimble assets -y && nimble setup && " f"{nim_path} compile --os:linux --cpu:{cpu} " f"--compileOnly --genScript --nimcache:{build_dir} " - f"{debug_options} src/frameos.nim 2>&1" + f"{debug_options} {extra_passes} src/frameos.nim 2>&1" ) status, out, err = await exec_local_command(db, redis, frame, cmd) if status != 0: - # Attempt to parse the last Nim error line for context + # Attempt to parse any relevant final line for error location lines = (out or "").split("\n") filtered = [ln for ln in lines if ln.strip()] if filtered: @@ -625,7 +693,7 @@ async def create_local_build_archive( raise Exception("nimbase.h not found") shutil.copy(nimbase_path, os.path.join(build_dir, "nimbase.h")) - # If waveshare, copy the variant C/h files + # Waveshare variant? if waveshare := drivers.get('waveshare'): if waveshare.variant: variant_folder = get_variant_folder(waveshare.variant) @@ -635,7 +703,6 @@ async def create_local_build_archive( os.path.join(source_dir, "src", "drivers", "waveshare", variant_folder, uf), os.path.join(build_dir, uf) ) - # color e-paper variants need bc-based filenames # e.g. EPD_2in9b -> EPD_2in9bc.(c/h) if waveshare.variant in [ @@ -651,14 +718,13 @@ async def create_local_build_archive( f"{waveshare.variant}.c", f"{waveshare.variant}.h" ] - for vf in variant_files: shutil.copy( os.path.join(source_dir, "src", "drivers", "waveshare", variant_folder, vf), os.path.join(build_dir, vf) ) - # Generate a Makefile from the Nim-generated compile_frameos.sh + nimc.Makefile template + # Generate the final Makefile with open(os.path.join(build_dir, "Makefile"), "w") as mk: script_path = os.path.join(build_dir, "compile_frameos.sh") linker_flags = ["-pthread", "-lm", "-lrt", "-ldl"] @@ -681,101 +747,40 @@ async def create_local_build_archive( and fl not in ['-o', '-c', '-D'] ] + if do_cross_compile: + if cpu == "arm": + linker_flags += ["-L/usr/lib/arm-linux-gnueabihf"] + compiler_flags += ["-I/usr/include/arm-linux-gnueabihf"] + elif cpu == "arm64": + linker_flags += ["-L/usr/lib/aarch64-linux-gnu"] + compiler_flags += ["-I/usr/include/aarch64-linux-gnu"] + # Base Makefile template with open(os.path.join(source_dir, "tools", "nimc.Makefile"), "r") as mf_in: lines_make = mf_in.readlines() - for ln in lines_make: if ln.startswith("LIBS = "): - ln = "LIBS = -L. " + " ".join(linker_flags) + "\n" + ln = ("LIBS = -L. " + " ".join(linker_flags) + "\n") if ln.startswith("CFLAGS = "): - # remove '-c' if present cf = [f for f in compiler_flags if f != '-c'] ln = "CFLAGS = " + " ".join(cf) + "\n" mk.write(ln) - # Finally, tar up the build directory (which includes .c, .h, Makefile, etc.) + # Make a tar of the entire build_dir archive_path = os.path.join(temp_dir, f"build_{build_id}.tar.gz") zip_base = os.path.join(temp_dir, f"build_{build_id}") shutil.make_archive(zip_base, 'gztar', temp_dir, f"build_{build_id}") return archive_path -def find_nim_executable(): - """ - Try 'nim' in PATH, else guess common install paths on each OS. - """ - common_paths = { - 'Windows': [ - 'C:\\Program Files\\Nim\\bin\\nim.exe', - 'C:\\Nim\\bin\\nim.exe' - ], - 'Darwin': [ - '/opt/homebrew/bin/nim', - '/usr/local/bin/nim' - ], - 'Linux': [ - '/usr/bin/nim', - '/usr/local/bin/nim', - '/opt/nim/bin/nim', - ] - } - if is_executable_in_path('nim'): - return 'nim' - os_type = platform.system() - for path in common_paths.get(os_type, []): - if os.path.isfile(path) and os.access(path, os.X_OK): - return path - return None - - -def is_executable_in_path(executable: str): - """ - Check if `executable` is callable from PATH. - """ - try: - subprocess.run([executable, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) - return True - except FileNotFoundError: - return False - - -def get_nim_version(executable_path: str): - """ - Return a parsed packaging.version.Version for nim --version. - """ - try: - result = subprocess.run([executable_path, '--version'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, - text=True) - # Typically: Nim Compiler Version 2.2.0 [MacOSX: arm64] - output = result.stdout.split('\n')[0] - # e.g. "Nim Compiler Version 2.2.0" - parts = output.split() - # The version is usually parts[3], but let's be defensive - for p in parts: - if re.match(r'^\d+(\.\d+){1,2}', p): - return version.parse(p) - return None - except Exception as e: - print(f"Error getting Nim version: {e}") - return None - - def find_nimbase_file(nim_executable: str): - """ - Attempt to find nimbase.h in Nim's lib directory, - scanning possible locations on each OS. - """ nimbase_paths: list[str] = [] - - # Nim 2.x 'nim dump' can yield Nim paths in stderr try: + # Attempt nim dump to see if it reveals the Nim lib location nim_dump_output = subprocess.run( [nim_executable, "dump"], text=True, stdout=subprocess.DEVNULL, stderr=subprocess.PIPE ).stderr - # Collect lines that reference 'lib' nimbase_paths.extend(line for line in nim_dump_output.splitlines() if 'lib' in line) except subprocess.CalledProcessError as e: print(f"Error running 'nim dump': {e}") @@ -789,7 +794,6 @@ def find_nimbase_file(nim_executable: str): elif os_type == 'Windows': nimbase_paths.append('C:\\Nim\\lib') - # Also check Homebrew Cellar for Nim if os_type == 'Darwin': base_dir = '/opt/homebrew/Cellar/nim/' if os.path.exists(base_dir): @@ -798,10 +802,147 @@ def find_nimbase_file(nim_executable: str): if os.path.isfile(nb_file): return nb_file - # See if any path leads to nimbase.h for path in nimbase_paths: nb_file = os.path.join(path, 'nimbase.h') if os.path.isfile(nb_file): return nb_file - return None + + +def requires_vendor_upload(drivers: dict) -> bool: + """ + Returns True if we have inky drivers that require uploading Python code to the Pi. + """ + return any(k in drivers for k in ["inkyPython", "inkyHyperPixel2r"]) + + +def copy_vendor_folders(drivers: dict, vendor_folder_temp: str): + """ + Copies Inky or other vendor folders into a temp area for tar/transfer. + """ + if inkyPython := drivers.get('inkyPython'): + vf = inkyPython.vendor_folder or "" + local_from = f"../frameos/vendor/{vf}/" + dest = os.path.join(vendor_folder_temp, vf) + shutil.copytree(local_from, dest, dirs_exist_ok=True) + # remove venv, __pycache__ to reduce size + shutil.rmtree(os.path.join(dest, "env"), ignore_errors=True) + shutil.rmtree(os.path.join(dest, "__pycache__"), ignore_errors=True) + + if inkyHyperPixel2r := drivers.get('inkyHyperPixel2r'): + vf = inkyHyperPixel2r.vendor_folder or "" + local_from = f"../frameos/vendor/{vf}/" + dest = os.path.join(vendor_folder_temp, vf) + shutil.copytree(local_from, dest, dirs_exist_ok=True) + shutil.rmtree(os.path.join(dest, "env"), ignore_errors=True) + shutil.rmtree(os.path.join(dest, "__pycache__"), ignore_errors=True) + + +async def install_inky_vendors(db: Session, redis: Redis, frame: Frame, ssh, build_id: str, drivers: dict): + """ + If the user wants inky/HyperPixel drivers, set up the Python venv on the Pi. + (We assume the vendor folder was either included in the on-device build tar + or scp'd separately if cross-compiled.) + """ + if inkyPython := drivers.get("inkyPython"): + await install_if_necessary(db, redis, frame, ssh, "python3-pip") + await install_if_necessary(db, redis, frame, ssh, "python3-venv") + cmd = ( + f"cd /srv/frameos/vendor/{inkyPython.vendor_folder} && " + "([ ! -d env ] && python3 -m venv env || echo 'env exists') && " + "(sha256sum -c requirements.txt.sha256sum 2>/dev/null || " + "(echo '> env/bin/pip3 install -r requirements.txt' && " + "env/bin/pip3 install -r requirements.txt && " + "sha256sum requirements.txt > requirements.txt.sha256sum))" + ) + await exec_command(db, redis, frame, ssh, cmd) + + if inkyHyperPixel2r := drivers.get("inkyHyperPixel2r"): + await install_if_necessary(db, redis, frame, ssh, "python3-dev") + await install_if_necessary(db, redis, frame, ssh, "python3-pip") + await install_if_necessary(db, redis, frame, ssh, "python3-venv") + cmd = ( + f"cd /srv/frameos/vendor/{inkyHyperPixel2r.vendor_folder} && " + "([ ! -d env ] && python3 -m venv env || echo 'env exists') && " + "(sha256sum -c requirements.txt.sha256sum 2>/dev/null || " + "(echo '> env/bin/pip3 install -r requirements.txt' && " + "env/bin/pip3 install -r requirements.txt && " + "sha256sum requirements.txt > requirements.txt.sha256sum))" + ) + await exec_command(db, redis, frame, ssh, cmd) + + +async def handle_additional_device_config(db: Session, redis: Redis, frame: Frame, ssh, arch: str, drivers: dict): + """ + E.g. enabling I2C, SPI, or messing with apt-daily timers for low memory devices, + plus appending lines to /boot/config.txt if needed. + """ + mem_output: list[str] = [] + await exec_command(db, redis, frame, ssh, "free -m", mem_output, raise_on_error=False) + total_memory = 0 + try: + total_memory = int(mem_output[1].split()[1]) + except: + pass + low_memory = (total_memory < 512) + + boot_config = "/boot/config.txt" + if await exec_command(db, redis, frame, ssh, "test -f /boot/firmware/config.txt", raise_on_error=False) == 0: + boot_config = "/boot/firmware/config.txt" + + # i2c + if drivers.get("i2c"): + await exec_command(db, redis, frame, ssh, + f'grep -q "^dtparam=i2c_vc=on$" {boot_config} ' + f'|| echo "dtparam=i2c_vc=on" | sudo tee -a {boot_config}') + await exec_command(db, redis, frame, ssh, + 'command -v raspi-config > /dev/null && ' + 'sudo raspi-config nonint get_i2c | grep -q "1" && { ' + ' sudo raspi-config nonint do_i2c 0; echo "I2C enabled"; ' + '} || echo "I2C already enabled"') + + # spi + if drivers.get("spi"): + await exec_command(db, redis, frame, ssh, 'sudo raspi-config nonint do_spi 0') + elif drivers.get("noSpi"): + await exec_command(db, redis, frame, ssh, 'sudo raspi-config nonint do_spi 1') + + # Possibly disable apt timers on low memory + if low_memory: + await exec_command( + db, redis, frame, ssh, + "systemctl is-enabled apt-daily-upgrade.timer 2>/dev/null | grep -q masked || " + "(" + " sudo systemctl mask apt-daily-upgrade && " + " sudo systemctl mask apt-daily && " + " sudo systemctl disable apt-daily.service apt-daily.timer apt-daily-upgrade.timer apt-daily-upgrade.service" + ")" + ) + + # Reboot or auto-restart logic from frame.reboot + if frame.reboot and frame.reboot.get('enabled') == 'true': + cron_schedule = frame.reboot.get('crontab', '0 0 * * *') + if frame.reboot.get('type') == 'raspberry': + crontab = f"{cron_schedule} root /sbin/shutdown -r now" + else: + crontab = f"{cron_schedule} root systemctl restart frameos.service" + await exec_command(db, redis, frame, ssh, f"echo '{crontab}' | sudo tee /etc/cron.d/frameos-reboot") + else: + await exec_command(db, redis, frame, ssh, "sudo rm -f /etc/cron.d/frameos-reboot") + + # If we have lines to add to /boot/config.txt: + if drivers.get("bootconfig"): + lines = drivers["bootconfig"].lines + must_reboot = False + for line in lines: + cmd = f'grep -q "^{line}" {boot_config}' + if await exec_command(db, redis, frame, ssh, cmd, raise_on_error=False) != 0: + # not found in boot_config + await exec_command( + db, redis, frame, ssh, + f'echo "{line}" | sudo tee -a {boot_config}', + log_output=False + ) + must_reboot = True + # We store that in the driver dict so the main deploy logic can check: + drivers["bootconfig"].needs_reboot = must_reboot diff --git a/frameos/frameos.nimble b/frameos/frameos.nimble index ac9979b6..1d45b4ea 100644 --- a/frameos/frameos.nimble +++ b/frameos/frameos.nimble @@ -20,6 +20,7 @@ requires "linuxfb >= 0.1.0" requires "psutil >= 0.6.0" requires "ws >= 0.5.0" requires "qrgen >= 3.1.0" +requires "nimassets >= 0.2.4" taskRequires "assets", "nimassets >= 0.2.4" diff --git a/frameos/nimble.lock b/frameos/nimble.lock index f2697147..c29a117c 100644 --- a/frameos/nimble.lock +++ b/frameos/nimble.lock @@ -139,6 +139,28 @@ "sha1": "8e639fafa952f3e9d0315f181aa05e0694603bfc" } }, + "zstd": { + "version": "0.9.0", + "vcsRevision": "f8f80a57ff782f176b16de0b3885600523d39d80", + "url": "https://github.com/wltsmrz/nim_zstd", + "downloadMethod": "git", + "dependencies": [], + "checksums": { + "sha1": "20b23158e94f01ea0c4bf419a21b0feabe70bf31" + } + }, + "nimassets": { + "version": "0.2.4", + "vcsRevision": "d06724dd7b80fb470542ab932f3a94af78fe2eb1", + "url": "https://github.com/xmonader/nimassets", + "downloadMethod": "git", + "dependencies": [ + "zstd" + ], + "checksums": { + "sha1": "71d5510ad86a323fc0ad5dc6b774261e80fe0361" + } + }, "zippy": { "version": "0.10.11", "vcsRevision": "9560f3d20479fb390c97f731ef8d100f1ed54e6c", @@ -201,29 +223,6 @@ } }, "tasks": { - "assets": { - "zstd": { - "version": "0.9.0", - "vcsRevision": "f8f80a57ff782f176b16de0b3885600523d39d80", - "url": "https://github.com/wltsmrz/nim_zstd", - "downloadMethod": "git", - "dependencies": [], - "checksums": { - "sha1": "20b23158e94f01ea0c4bf419a21b0feabe70bf31" - } - }, - "nimassets": { - "version": "0.2.4", - "vcsRevision": "d06724dd7b80fb470542ab932f3a94af78fe2eb1", - "url": "https://github.com/xmonader/nimassets", - "downloadMethod": "git", - "dependencies": [ - "zstd" - ], - "checksums": { - "sha1": "71d5510ad86a323fc0ad5dc6b774261e80fe0361" - } - } - } + "assets": {} } } diff --git a/frameos/tools/nimc.Makefile b/frameos/tools/nimc.Makefile index 5eee68eb..5464c0ad 100644 --- a/frameos/tools/nimc.Makefile +++ b/frameos/tools/nimc.Makefile @@ -5,8 +5,8 @@ SOURCES := $(shell ls -S *.c) OBJECTS = $(SOURCES:.c=.o) TOTAL = $(words $(SOURCES)) EXECUTABLE = frameos -LIBS = -pthread -lm -lm -lrt -ldl -CFLAGS = -w -fmax-errors=3 -pthread -O3 -fno-strict-aliasing -fno-ident -fno-math-errno +LIBS = -pthread -lm -lm -lrt -ldl $SYSROOT +CFLAGS = -w -fmax-errors=3 -pthread -O3 -fno-strict-aliasing -fno-ident -fno-math-errno -g all: $(EXECUTABLE) @@ -19,7 +19,6 @@ clean: pre-build: @mkdir -p ../cache - @echo "Compiling. This might take minutes on the first run." $(OBJECTS): pre-build