Skip to content

Commit

Permalink
Create Make helpers for running codegen (#2222)
Browse files Browse the repository at this point in the history
The codegen process is a multi-step process that requires compiling,
executing code under simulation, and executing python scripts. To
simplify this workflow, this commit adds Make helper functions for
generating inference source code from a model and creating a binary with
it.

It also updates the hello world example to use these helpers and adds an
update script for keeping the checked in generated source in sync.

BUG=cleanup
  • Loading branch information
rascani authored Sep 21, 2023
1 parent d027f2a commit 1e9b4c5
Show file tree
Hide file tree
Showing 9 changed files with 153 additions and 48 deletions.
18 changes: 7 additions & 11 deletions codegen/examples/hello_world/Makefile.inc
Original file line number Diff line number Diff line change
@@ -1,14 +1,10 @@
# TODO(rjascani): The codegen runtime files (ie, in runtime subdir) should be a
# separate library.
CODEGEN_HELLO_WORLD_SRCS := \
$(TENSORFLOW_ROOT)codegen/examples/hello_world/hello_world.cc \
$(TENSORFLOW_ROOT)codegen/examples/hello_world/hello_world_model.cc \
$(TENSORFLOW_ROOT)codegen/runtime/micro_codegen_context.cc
CODEGEN_HELLO_WORLD_MODEL := \
$(TENSORFLOW_ROOT)tensorflow/lite/micro/examples/hello_world/models/hello_world_int8.tflite

CODEGEN_HELLO_WORLD_HDRS := \
$(TENSORFLOW_ROOT)codegen/examples/hello_world/hello_world_model.h \
$(TENSORFLOW_ROOT)codegen/runtime/micro_codegen_context.h
CODEGEN_HELLO_WORLD_SRCS := \
$(TENSORFLOW_ROOT)codegen/examples/hello_world/hello_world.cc

# Builds a standalone binary.
$(eval $(call microlite_test,codegen_hello_world,\
$(CODEGEN_HELLO_WORLD_SRCS),,))
$(eval $(call codegen_model_binary,codegen_hello_world,hello_world_model,\
$(CODEGEN_HELLO_WORLD_MODEL),$(CODEGEN_HELLO_WORLD_SRCS),,))

47 changes: 12 additions & 35 deletions codegen/examples/hello_world/README.md
Original file line number Diff line number Diff line change
@@ -1,51 +1,28 @@
# Codegen Hello World Example

This is a code-generated example of the hello world model. The process is
currently somewhat involved:
This is a code-generated example of the hello world model. The generated source
is checked in for now so that it can be reviewed during the prototyping stage.

## Build the preprocessor for your target
## Building the example executable
Please note that this will execute Bazel from make as part of the process.

This creates a target-specific preprocessor binary capable of performing the
init and prepare stages of the Interpreter and serializing the output. This
binary can be re-used for multiple models.

### x86
```
make -f tensorflow/lite/micro/tools/make/Makefile codegen_preprocessor
make -f tensorflow/lite/micro/tools/make/Makefile codegen_hello_world
```

## Run the preprocessor
## Running the example

The preprocessor will take the provided model, create a TFLM Interpreter, and
allocate tensors. It will then capture and serialize the resulting data
structures needed for inference. For embedded targets, this should be run under
simulation.
TODO(rjascani): The command works, but it'll just crash as we don't have all of
the data structures fully populated yet.

### x86
```
./gen/linux_x86_64_default/bin/codegen_preprocessor \
$(pwd)/tensorflow/lite/micro/examples/hello_world/models/hello_world_int8.tflite \
$(pwd)/gen/linux_86_64_default/genfiles/hello_world_int8.ppd
make -f tensorflow/lite/micro/tools/make/Makefile run_codegen_hello_world
```

## Generate the inference code
## Updating the generated sources
To update the generated source, you can execute this make target:

To generate the inference code at `codegen/example/hello_world_model.h/.cc`:

### x86
```
bazel run codegen:code_generator -- \
--model $(pwd)/tensorflow/lite/micro/examples/hello_world/models/hello_world_int8.tflite \
--preprocessed_data $(pwd)/gen/linux_86_64_default/genfiles/hello_world_int8.ppd \
--output_dir $(pwd)/codegen/examples/hello_world \
--output_name hello_world_model
./codegen/examples/hello_world/update_example_source.sh
```

## Compile the generated inference code

To compile the generated source, you can use the Makefile:

### x86
```
make -f tensorflow/lite/micro/tools/make/Makefile codegen_hello_world
```
29 changes: 29 additions & 0 deletions codegen/examples/hello_world/update_example_source.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
#!/usr/bin/env bash
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================

#
# Syncs the generated example source code in the repository.
#

set -e

SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
ROOT_DIR=${SCRIPT_DIR}/../../..
cd "${ROOT_DIR}"

make -j8 -f tensorflow/lite/micro/tools/make/Makefile codegen_hello_world
cp ./gen/linux_x86_64_default/genfiles/hello_world_model.h ${SCRIPT_DIR}
cp ./gen/linux_x86_64_default/genfiles/hello_world_model.cc ${SCRIPT_DIR}
12 changes: 10 additions & 2 deletions tensorflow/lite/micro/tools/make/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@ endif
TENSORFLOW_ROOT :=
RELATIVE_MAKEFILE_DIR := tensorflow/lite/micro/tools/make
MAKEFILE_DIR := $(TENSORFLOW_ROOT)$(RELATIVE_MAKEFILE_DIR)
BAZEL_ROOT := $(TENSORFLOW_ROOT)
ifeq ($(BAZEL_ROOT),)
BAZEL_ROOT = .
endif

# Pull in some convenience functions.
include $(MAKEFILE_DIR)/helper_functions.inc
Expand Down Expand Up @@ -294,6 +298,8 @@ MICRO_LITE_BENCHMARKS := $(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tool
MICROLITE_BENCHMARK_SRCS := \
$(wildcard $(TENSORFLOW_ROOT)tensorflow/lite/micro/tools/benchmarking/*benchmark.cc)

CODEGEN_PREPROCESSOR_PATH := $(BINDIR)codegen_preprocessor

MICRO_LITE_CODEGEN_PREPROCESSOR := $(TENSORFLOW_ROOT)codegen/preprocessor/Makefile.inc

MICRO_LITE_CODEGEN_EXAMPLES := $(shell find $(TENSORFLOW_ROOT)codegen/examples/ -name Makefile.inc)
Expand Down Expand Up @@ -552,7 +558,11 @@ include $(MAKEFILE_DIR)/additional_kernels.inc
MICROLITE_CC_SRCS := $(filter-out $(MICROLITE_TEST_SRCS), $(MICROLITE_CC_BASE_SRCS))
MICROLITE_CC_SRCS := $(filter-out $(MICROLITE_BENCHMARK_SRCS), $(MICROLITE_CC_SRCS))

CODEGEN_RUNTIME_CC_SRCS := \
$(TENSORFLOW_ROOT)codegen/runtime/micro_codegen_context.cc

CODEGEN_RUNTIME_CC_HDRS := \
$(TENSORFLOW_ROOT)codegen/runtime/micro_codegen_context.h

# The download scripts require that the downloads directory already exist for
# improved error checking. To accomodate that, we first create a downloads
Expand Down Expand Up @@ -868,8 +878,6 @@ integration_tests: $(MICROLITE_INTEGRATION_TEST_TARGETS)
generated_micro_mutable_op_resolver: $(MICROLITE_GEN_OP_RESOLVER_TEST_TARGETS)
endif

CODEGEN_PREPROCESSOR_PATH := $(BINDIR)codegen_preprocessor

codegen_preprocessor: $(CODEGEN_PREPROCESSOR_PATH)

$(CODEGEN_PREPROCESSOR_PATH): $(CODEGEN_PREPROCESSOR_SRCS) $(MICROLITE_LIB_PATH)
Expand Down
91 changes: 91 additions & 0 deletions tensorflow/lite/micro/tools/make/helper_functions.inc
Original file line number Diff line number Diff line change
Expand Up @@ -117,3 +117,94 @@ endef
# 2 - File pattern, e.g: *.h
recursive_find = $(wildcard $(1)$(2)) $(foreach dir,$(wildcard $(1)*),$(call recursive_find,$(dir)/,$(2)))

# Generates code capable of performing inference without an interpreter. It run
# the codegen preprocessor and the code generator.
#
# Arguments are:
# 1 - Name of target
# 2 - Generated source basename
# 3 - Model
# Calling eval on the output will create the targets that you need.
define codegen_model
# Filter out targets that currently don't support codegen:
# Bluepill: Is compiled with nostdlib, but preprocessor uses standard library.
# RISC-V: TODO(b/300484340): qemu-riscv32 currently does not support semi-
# hosting, which prevents the preprocessor from opening a file on the
# host filesystem.
# Hexagon: TODO(b/300322637): The hexagon 3.5.1 SDK doesn't provide a working
# C++11 stdlib, so preprocessor fails to link.
ifneq ($(TARGET), $(filter $(TARGET), bluepill riscv32_generic hexagon))

$(1)_MODEL := $(3)
$(1)_PREPROCESSOR_OUTPUT := $(GENERATED_SRCS_DIR)$(2).ppd

$(1)_GENERATED_SRCS := $(GENERATED_SRCS_DIR)$(2).cc
$(1)_GENERATED_HDRS := $(GENERATED_SRCS_DIR)$(2).h

$$($(1)_PREPROCESSOR_OUTPUT): $(CODEGEN_PREPROCESSOR_PATH) $$($(1)_MODEL)
@mkdir -p $$(dir $$@)
$$(RUN_COMMAND) $(CODEGEN_PREPROCESSOR_PATH) \
$(abspath $$($(1)_MODEL)) $(abspath $$($(1)_PREPROCESSOR_OUTPUT))

$$($(1)_GENERATED_SRCS) $$($(1)_GENERATED_HDRS): $$($(1)_MODEL) $$($(1)_PREPROCESSOR_OUTPUT)
cd $(BAZEL_ROOT) && bazel run //codegen:code_generator -- \
--model $(abspath $$($(1)_MODEL)) \
--preprocessed_data $(abspath $$($(1)_PREPROCESSOR_OUTPUT)) \
--output_dir $(abspath $(GENERATED_SRCS_DIR)) --output_name $(2)

$(1): $$($(1)_GENERATED_SRCS) $$($(1)_GENERATED_HDRS)

endif
endef # codegen_model

# Generates and compiles code capable of performing inference without an
# interpreter.
#
# Users can use `make run_<target>` to execute the binary in the appropriate
# simulator.
#
# Arguments are:
# 1 - Name of target
# 2 - Generated source basename
# 3 - Model
# 4 - C/C++ source files
# 5 - C/C++ header files
# Calling eval on the output will create the targets that you need.
define codegen_model_binary
# Filter out targets that currently don't support codegen:
# Bluepill: Is compiled with nostdlib, but preprocessor uses standard library.
# RISC-V: TODO(b/300484340): qemu-riscv32 currently does not support semi-
# hosting, which prevents the preprocessor from opening a file on the
# host filesystem.
# Hexagon: TODO(b/300322637): The hexagon 3.5.1 SDK doesn't provide a working
# C++11 stdlib, so preprocessor fails to link.
ifneq ($(TARGET), $(filter $(TARGET), bluepill riscv32_generic hexagon))

$(1)_CODEGEN_SRCS := $(4) $$(CODEGEN_RUNTIME_CC_SRCS)
$(1)_CODEGEN_HDRS := $(5) $$(CODEGEN_RUNTIME_CC_HDRS)

$(call codegen_model,$(1)_codegen,$(2),$(3))

$(1)_CODEGEN_SRCS += $$($(1)_codegen_GENERATED_SRCS)
$(1)_CODEGEN_HDRS += $$($(1)_codegen_GENERATED_HDRS)

$(1)_CODEGEN_OBJS := $$(addprefix $$(CORE_OBJDIR), \
$$(patsubst %.S,%.o,$$(patsubst %.cc,%.o,$$(patsubst %.c,%.o,$$($(1)_CODEGEN_SRCS)))))

$(1)_BINARY := $$(BINDIR)$(1)
$$($(1)_BINARY): $$($(1)_CODEGEN_OBJS) $$(MICROLITE_LIB_PATH)
@mkdir -p $$(dir $$@)
$$(CXX) $$(CXXFLAGS) $$(INCLUDES) \
-o $$($(1)_BINARY) $$($(1)_CODEGEN_OBJS) \
$$(MICROLITE_LIB_PATH) $$(LDFLAGS) $$(MICROLITE_LIBS)

$(1): $$($(1)_BINARY)
$(1)_bin: $$($(1)_BINARY).bin

MICROLITE_BUILD_TARGETS += $$($(1)_BINARY)

run_$(1): $$($(1)_BINARY)
$$(RUN_COMMAND) $$($(1)_BINARY)

endif
endef # codegen_model_binary
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,4 @@ EXCLUDED_TESTS := \
MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))

TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_with_qemu.sh arm $(TARGET_ARCH)
RUN_COMMAND := qemu-arm -cpu $(TARGET_ARCH)
Original file line number Diff line number Diff line change
Expand Up @@ -114,3 +114,4 @@ MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))

TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_hexagon_binary.sh
SIZE_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/size_hexagon_binary.sh
RUN_COMMAND := hexagon-sim
Original file line number Diff line number Diff line change
Expand Up @@ -45,4 +45,5 @@ MICROLITE_TEST_SRCS := $(filter-out $(EXCLUDED_TESTS), $(MICROLITE_TEST_SRCS))
LDFLAGS += -mno-relax
TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_with_qemu.sh riscv32 rv32
SIZE_SCRIPT := ${TENSORFLOW_ROOT}tensorflow/lite/micro/testing/size_riscv32_binary.sh
RUN_COMMAND := qemu-riscv32 -cpu rv32

Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,7 @@ CXXFLAGS += $(XTENSA_EXTRA_CFLAGS)

TEST_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/test_xtensa_binary.sh
SIZE_SCRIPT := $(TENSORFLOW_ROOT)tensorflow/lite/micro/testing/size_xtensa_binary.sh
RUN_COMMAND := xt-run

# TODO(b/158651472): Fix the memory_arena_threshold_test
# TODO(b/174707181): Fix the micro_interpreter_test
Expand Down

0 comments on commit 1e9b4c5

Please sign in to comment.