Skip to content

Commit

Permalink
Revert Tinygrad (#34243)
Browse files Browse the repository at this point in the history
* Revert "dmonitoringmodeld: use cl transform (#34235)"

This reverts commit 684b0b9.

* Revert "load model before calling convert_fp16_to_fp32"

This reverts commit 31606a7.

* Revert "bump tinygrad"

This reverts commit 44f58ff.

* Revert "Tinygrad runner (#34171)"

This reverts commit 7b5a4fb.

* Allow init buffer

* typo
  • Loading branch information
haraschax authored Dec 14, 2024
1 parent 4c27878 commit f6885dc
Show file tree
Hide file tree
Showing 39 changed files with 1,547 additions and 368 deletions.
9 changes: 1 addition & 8 deletions common/transformations/model.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import numpy as np

from openpilot.common.transformations.orientation import rot_from_euler
from openpilot.common.transformations.camera import get_view_frame_from_calib_frame, view_frame_from_device_frame, _ar_ox_fisheye
from openpilot.common.transformations.camera import get_view_frame_from_calib_frame, view_frame_from_device_frame

# segnet
SEGNET_SIZE = (512, 384)
Expand Down Expand Up @@ -39,13 +39,6 @@
[0.0, sbigmodel_fl, 0.5 * (256 + MEDMODEL_CY)],
[0.0, 0.0, 1.0]])

DM_INPUT_SIZE = (1440, 960)
dmonitoringmodel_fl = _ar_ox_fisheye.focal_length
dmonitoringmodel_intrinsics = np.array([
[dmonitoringmodel_fl, 0.0, DM_INPUT_SIZE[0]/2],
[0.0, dmonitoringmodel_fl, DM_INPUT_SIZE[1]/2 - (_ar_ox_fisheye.height - DM_INPUT_SIZE[1])/2],
[0.0, 0.0, 1.0]])

bigmodel_frame_from_calib_frame = np.dot(bigmodel_intrinsics,
get_view_frame_from_calib_frame(0, 0, 0, 0))

Expand Down
3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,8 @@ dependencies = [

# modeld
"onnx >= 1.14.0",
"onnxruntime >=1.16.3",
"onnxruntime >=1.16.3; platform_system == 'Linux' and platform_machine == 'aarch64'",
"onnxruntime-gpu >=1.16.3; platform_system == 'Linux' and platform_machine == 'x86_64'",

# logging
"pyzmq",
Expand Down
2 changes: 1 addition & 1 deletion release/release_files.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@
"tools/joystick/",
"tools/longitudinal_maneuvers/",

"tinygrad_repo/examples/openpilot/compile3.py",
"tinygrad_repo/openpilot/compile2.py",
"tinygrad_repo/extra/onnx.py",
"tinygrad_repo/extra/onnx_ops.py",
"tinygrad_repo/extra/thneed.py",
Expand Down
43 changes: 32 additions & 11 deletions selfdrive/modeld/SConscript
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,20 @@ common_src = [
"transforms/transform.cc",
]

thneed_src_common = [
"thneed/thneed_common.cc",
"thneed/serialize.cc",
]

thneed_src_qcom = thneed_src_common + ["thneed/thneed_qcom2.cc"]
thneed_src_pc = thneed_src_common + ["thneed/thneed_pc.cc"]
thneed_src = thneed_src_qcom if arch == "larch64" else thneed_src_pc

# SNPE except on Mac and ARM Linux
snpe_lib = []
if arch != "Darwin" and arch != "aarch64":
common_src += ['runners/snpemodel.cc']
snpe_lib += ['SNPE']

# OpenCL is a framework on Mac
if arch == "Darwin":
Expand All @@ -31,7 +45,11 @@ snpe_rpath_pc = f"{Dir('#').abspath}/third_party/snpe/x86_64-linux-clang"
snpe_rpath = lenvCython['RPATH'] + [snpe_rpath_qcom if arch == "larch64" else snpe_rpath_pc]

cython_libs = envCython["LIBS"] + libs
snpemodel_lib = lenv.Library('snpemodel', ['runners/snpemodel.cc'])
commonmodel_lib = lenv.Library('commonmodel', common_src)

lenvCython.Program('runners/runmodel_pyx.so', 'runners/runmodel_pyx.pyx', LIBS=cython_libs, FRAMEWORKS=frameworks)
lenvCython.Program('runners/snpemodel_pyx.so', 'runners/snpemodel_pyx.pyx', LIBS=[snpemodel_lib, snpe_lib, *cython_libs], FRAMEWORKS=frameworks, RPATH=snpe_rpath)
lenvCython.Program('models/commonmodel_pyx.so', 'models/commonmodel_pyx.pyx', LIBS=[commonmodel_lib, *cython_libs], FRAMEWORKS=frameworks)

tinygrad_files = ["#"+x for x in glob.glob(env.Dir("#tinygrad_repo").relpath + "/**", recursive=True, root_dir=env.Dir("#").abspath)]
Expand All @@ -41,17 +59,20 @@ fn = File("models/supercombo").abspath
cmd = f'python3 {Dir("#selfdrive/modeld").abspath}/get_model_metadata.py {fn}.onnx'
lenv.Command(fn + "_metadata.pkl", [fn + ".onnx"] + tinygrad_files, cmd)

# Compile tinygrad model
# TODO this is all super hacky
# Build thneed model
if arch == "larch64" or GetOption('pc_thneed'):
tinygrad_opts = []
if not GetOption('pc_thneed'):
# use FLOAT16 on device for speed + don't cache the CL kernels for space
tinygrad_opts += ["FLOAT16=1", "PYOPENCL_NO_CACHE=1"]
cmd = f"cd {Dir('#').abspath}/tinygrad_repo && " + ' '.join(tinygrad_opts) + f" python3 openpilot/compile2.py {fn}.onnx {fn}.thneed"

pythonpath_string = 'PYTHONPATH="${PYTHONPATH}:' + env.Dir("#tinygrad_repo").abspath + '"'
if arch == 'larch64':
device_string = 'QCOM=1'
else:
device_string = 'CLANG=1 IMAGE=0'
lenv.Command(fn + ".thneed", [fn + ".onnx"] + tinygrad_files, cmd)

for model_name in ['supercombo', 'dmonitoring_model']:
fn = File(f"models/{model_name}").abspath
cmd = f'{pythonpath_string} {device_string} python3 {Dir("#tinygrad_repo").abspath}/examples/openpilot/compile3.py {fn}.onnx {fn}_tinygrad.pkl'
lenv.Command(fn + "_tinygrad.pkl", [fn + ".onnx"] + tinygrad_files, cmd)
fn_dm = File("models/dmonitoring_model").abspath
cmd = f"cd {Dir('#').abspath}/tinygrad_repo && " + ' '.join(tinygrad_opts) + f" python3 openpilot/compile2.py {fn_dm}.onnx {fn_dm}.thneed"
lenv.Command(fn_dm + ".thneed", [fn_dm + ".onnx"] + tinygrad_files, cmd)

thneed_lib = env.SharedLibrary('thneed', thneed_src, LIBS=[gpucommon, common, 'OpenCL', 'dl'])
thneedmodel_lib = env.Library('thneedmodel', ['runners/thneedmodel.cc'])
lenvCython.Program('runners/thneedmodel_pyx.so', 'runners/thneedmodel_pyx.pyx', LIBS=envCython["LIBS"]+[thneedmodel_lib, thneed_lib, gpucommon, common, 'dl', 'OpenCL'])
6 changes: 6 additions & 0 deletions selfdrive/modeld/dmonitoringmodeld
Original file line number Diff line number Diff line change
@@ -1,4 +1,10 @@
#!/usr/bin/env bash

DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null && pwd)"
cd "$DIR/../../"

if [ -f "$DIR/libthneed.so" ]; then
export LD_PRELOAD="$DIR/libthneed.so"
fi

exec "$DIR/dmonitoringmodeld.py" "$@"
75 changes: 26 additions & 49 deletions selfdrive/modeld/dmonitoringmodeld.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,8 @@
#!/usr/bin/env python3
import os
from openpilot.system.hardware import TICI
## TODO this is hack
if TICI:
from tinygrad.tensor import Tensor
from tinygrad.dtype import dtypes
from openpilot.selfdrive.modeld.runners.tinygrad_helpers import qcom_tensor_from_opencl_address
os.environ['QCOM'] = '1'
else:
from openpilot.selfdrive.modeld.runners.ort_helpers import make_onnx_cpu_runner
import gc
import math
import time
import pickle
import ctypes
import numpy as np
from pathlib import Path
Expand All @@ -23,20 +13,21 @@
from msgq.visionipc import VisionIpcClient, VisionStreamType, VisionBuf
from openpilot.common.swaglog import cloudlog
from openpilot.common.realtime import set_realtime_priority
from openpilot.common.transformations.model import dmonitoringmodel_intrinsics, DM_INPUT_SIZE
from openpilot.common.transformations.camera import _ar_ox_fisheye, _os_fisheye
from openpilot.selfdrive.modeld.models.commonmodel_pyx import CLContext, MonitoringModelFrame
from openpilot.selfdrive.modeld.runners import ModelRunner, Runtime
from openpilot.selfdrive.modeld.models.commonmodel_pyx import CLContext
from openpilot.selfdrive.modeld.parse_model_outputs import sigmoid

MODEL_WIDTH, MODEL_HEIGHT = DM_INPUT_SIZE
CALIB_LEN = 3
MODEL_WIDTH = 1440
MODEL_HEIGHT = 960
FEATURE_LEN = 512
OUTPUT_SIZE = 84 + FEATURE_LEN

PROCESS_NAME = "selfdrive.modeld.dmonitoringmodeld"
SEND_RAW_PRED = os.getenv('SEND_RAW_PRED')
MODEL_PATH = Path(__file__).parent / 'models/dmonitoring_model.onnx'
MODEL_PKL_PATH = Path(__file__).parent / 'models/dmonitoring_model_tinygrad.pkl'
MODEL_PATHS = {
ModelRunner.THNEED: Path(__file__).parent / 'models/dmonitoring_model.thneed',
ModelRunner.ONNX: Path(__file__).parent / 'models/dmonitoring_model.onnx'}

class DriverStateResult(ctypes.Structure):
_fields_ = [
Expand Down Expand Up @@ -67,42 +58,33 @@ class DMonitoringModelResult(ctypes.Structure):
class ModelState:
inputs: dict[str, np.ndarray]
output: np.ndarray
model: ModelRunner

def __init__(self, cl_ctx):
assert ctypes.sizeof(DMonitoringModelResult) == OUTPUT_SIZE * ctypes.sizeof(ctypes.c_float)
self.output = np.zeros(OUTPUT_SIZE, dtype=np.float32)
self.inputs = {
'input_img': np.zeros(MODEL_HEIGHT * MODEL_WIDTH, dtype=np.uint8),
'calib': np.zeros(CALIB_LEN, dtype=np.float32)}

self.frame = MonitoringModelFrame(cl_ctx)
self.numpy_inputs = {
'calib': np.zeros((1, CALIB_LEN), dtype=np.float32),
}
self.model = ModelRunner(MODEL_PATHS, self.output, Runtime.GPU, False, cl_ctx)
self.model.addInput("input_img", None)
self.model.addInput("calib", self.inputs['calib'])

if TICI:
self.tensor_inputs = {k: Tensor(v, device='NPY').realize() for k,v in self.numpy_inputs.items()}
with open(MODEL_PKL_PATH, "rb") as f:
self.model_run = pickle.load(f)
else:
self.onnx_cpu_runner = make_onnx_cpu_runner(MODEL_PATH)
def run(self, buf:VisionBuf, calib:np.ndarray) -> tuple[np.ndarray, float]:
self.inputs['calib'][:] = calib

def run(self, buf:VisionBuf, calib:np.ndarray, transform:np.ndarray) -> tuple[np.ndarray, float]:
self.numpy_inputs['calib'][0,:] = calib
v_offset = buf.height - MODEL_HEIGHT
h_offset = (buf.width - MODEL_WIDTH) // 2
buf_data = buf.data.reshape(-1, buf.stride)
input_data = self.inputs['input_img'].reshape(MODEL_HEIGHT, MODEL_WIDTH)
input_data[:] = buf_data[v_offset:v_offset+MODEL_HEIGHT, h_offset:h_offset+MODEL_WIDTH]

self.model.setInputBuffer("input_img", self.inputs['input_img'].view(np.float32))
t1 = time.perf_counter()

input_img_cl = self.frame.prepare(buf, transform.flatten())
if TICI:
# The imgs tensors are backed by opencl memory, only need init once
if 'input_img' not in self.tensor_inputs:
self.tensor_inputs['input_img'] = qcom_tensor_from_opencl_address(input_img_cl.mem_address, (1, MODEL_WIDTH*MODEL_HEIGHT), dtype=dtypes.uint8)
else:
self.numpy_inputs['input_img'] = self.frame.buffer_from_cl(input_img_cl).reshape((1, MODEL_WIDTH*MODEL_HEIGHT))

if TICI:
output = self.model_run(**self.tensor_inputs).numpy().flatten()
else:
output = self.onnx_cpu_runner.run(None, self.numpy_inputs)[0].flatten()

self.model.execute()
t2 = time.perf_counter()
return output, t2 - t1
return self.output, t2 - t1


def fill_driver_state(msg, ds_result: DriverStateResult):
Expand Down Expand Up @@ -155,23 +137,18 @@ def main():
pm = PubMaster(["driverStateV2"])

calib = np.zeros(CALIB_LEN, dtype=np.float32)
model_transform = None

while True:
buf = vipc_client.recv()
if buf is None:
continue

if model_transform is None:
cam = _os_fisheye if buf.width == _os_fisheye.width else _ar_ox_fisheye
model_transform = np.linalg.inv(np.dot(dmonitoringmodel_intrinsics, np.linalg.inv(cam.intrinsics))).astype(np.float32)

sm.update(0)
if sm.updated["liveCalibration"]:
calib[:] = np.array(sm["liveCalibration"].rpyCalib)

t1 = time.perf_counter()
model_output, gpu_execution_time = model.run(buf, calib, model_transform)
model_output, gpu_execution_time = model.run(buf, calib)
t2 = time.perf_counter()

pm.send("driverStateV2", get_driverstate_packet(model_output, vipc_client.frame_id, vipc_client.timestamp_sof, t2 - t1, gpu_execution_time))
Expand Down
Loading

0 comments on commit f6885dc

Please sign in to comment.