Skip to content

Commit

Permalink
2024-11-10 nightly release (5b51bb8)
Browse files Browse the repository at this point in the history
  • Loading branch information
pytorchbot committed Nov 10, 2024
1 parent b9b434d commit 6b9cc29
Show file tree
Hide file tree
Showing 11 changed files with 271 additions and 102 deletions.
11 changes: 8 additions & 3 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -721,10 +721,15 @@ if(EXECUTORCH_BUILD_PYBIND)
-fPIC
-frtti
-fexceptions
# libtorch is built with the old ABI, so we need to do the same for any
# .cpp files that include torch, c10, or ATen targets.
-D_GLIBCXX_USE_CXX11_ABI=0
)
if(EXECUTORCH_DO_NOT_USE_CXX11_ABI)
# libtorch is built with the old ABI, so we need to do the same for any
# .cpp files that include torch, c10, or ATen targets. Note that PyTorch
# nightly binary is built with _GLIBCXX_USE_CXX11_ABI set to 0 while its
# CI build sets this to 1 (default)
list(APPEND _pybind_compile_options -D_GLIBCXX_USE_CXX11_ABI=0)
endif()

# util lib
add_library(
util ${CMAKE_CURRENT_SOURCE_DIR}/extension/evalue_util/print_evalue.cpp
Expand Down
6 changes: 5 additions & 1 deletion exir/pass_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,11 @@ def call_function(
if target == operator.getitem:
value, key = args
return self.callback.call_getitem(value, key, meta)
elif getattr(target, "__module__", None) in {"_operator", "math"}:
elif getattr(target, "__module__", None) in {
"_operator",
"builtins",
"math",
}:
assert callable(target)
return self.callback.call_sym(target, args, meta)
elif target in _TORCH_SYM_OPS:
Expand Down
2 changes: 1 addition & 1 deletion exir/passes/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ def get_submodule(node: torch.fx.Node) -> torch.fx.GraphModule:
self.call(get_submodule(node.args[0]))
self.call(get_submodule(node.args[1]))
continue
elif getattr(target, "__module__", None) == "_operator":
elif getattr(target, "__module__", None) in ("builtins", "_operator"):
continue
elif target in to_out_var_skiplist:
continue
Expand Down
17 changes: 15 additions & 2 deletions exir/passes/executorch_prim_ops_registry.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,10 @@
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.

import builtins
import math
import operator
from typing import Dict, Set, Union
from typing import Any, Dict, Set, Union

# necessary to ensure the ops are registered
import torch
Expand Down Expand Up @@ -94,12 +95,24 @@ def neg(a: _SymScalar) -> _SymScalar:
return -a # pyre-ignore


@bind_pattern_to_op(executorch_prims_lib, "ceil.Scalar(Scalar a) -> Scalar")
def ceil(a: _SymScalar) -> _SymScalar:
return math.ceil(a) # pyre-ignore


@bind_pattern_to_op(executorch_prims_lib, "round.Scalar(Scalar a) -> Scalar")
def builtin_round(a: _SymScalar) -> _SymScalar:
return round(a) # pyre-ignore


@bind_pattern_to_op(executorch_prims_lib, "trunc.Scalar(Scalar a) -> Scalar")
def trunc(a: _SymScalar) -> _SymScalar:
return math.trunc(a) # pyre-ignore


_PYTHON_SYM_OPS_TO_EXECUTORCH_SYM_OPS: Dict[OpOverload, OpOverload] = {
_PYTHON_SYM_OPS_TO_EXECUTORCH_SYM_OPS: Dict[Any, OpOverload] = {
builtins.round: ops.backend.executorch_prim.round.Scalar,
math.ceil: ops.backend.executorch_prim.ceil.Scalar,
math.trunc: ops.backend.executorch_prim.trunc.Scalar,
operator.sub: ops.backend.executorch_prim.sub.Scalar,
operator.mul: ops.backend.executorch_prim.mul.Scalar,
Expand Down
4 changes: 2 additions & 2 deletions extension/android/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ set(executorch_DIR ${CMAKE_CURRENT_BINARY_DIR}/../../lib/cmake/ExecuTorch)
find_package(executorch CONFIG REQUIRED)
target_link_options_shared_lib(executorch)

add_library(executorch_jni SHARED jni/jni_layer.cpp)
add_library(executorch_jni SHARED jni/jni_layer.cpp jni/log.cpp)

set(link_libraries)
list(
Expand Down Expand Up @@ -146,7 +146,7 @@ if(EXECUTORCH_JNI_CUSTOM_LIBRARY)
endif()

if(EXECUTORCH_BUILD_LLAMA_JNI)
target_sources(executorch_jni PRIVATE jni/jni_layer_llama.cpp)
target_sources(executorch_jni PRIVATE jni/jni_layer_llama.cpp jni/log.cpp)
list(APPEND link_libraries llama_runner llava_runner)
target_compile_definitions(executorch_jni PUBLIC EXECUTORCH_BUILD_LLAMA_JNI=1)
add_subdirectory(
Expand Down
23 changes: 21 additions & 2 deletions extension/android/jni/BUCK
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
load("@fbsource//tools/build_defs/android:fb_android_cxx_library.bzl", "fb_android_cxx_library")
load("@fbsource//xplat/executorch/backends/xnnpack/third-party:third_party_libs.bzl", "third_party_dep")
load("@fbsource//xplat/executorch/build:runtime_wrapper.bzl", "runtime")
load("@fbsource//xplat/executorch/codegen:codegen.bzl", "executorch_generated_lib")

oncall("executorch")
Expand All @@ -25,7 +26,7 @@ executorch_generated_lib(

fb_android_cxx_library(
name = "executorch_jni",
srcs = ["jni_layer.cpp"],
srcs = ["jni_layer.cpp", "log.cpp"],
headers = ["jni_layer_constants.h"],
allow_jni_merging = False,
compiler_flags = [
Expand All @@ -36,6 +37,7 @@ fb_android_cxx_library(
soname = "libexecutorch.$(ext)",
visibility = ["PUBLIC"],
deps = [
":log_provider_static",
"//fbandroid/libraries/fbjni:fbjni",
"//fbandroid/native/fb:fb",
"//third-party/glog:glog",
Expand All @@ -49,7 +51,7 @@ fb_android_cxx_library(

fb_android_cxx_library(
name = "executorch_jni_full",
srcs = ["jni_layer.cpp"],
srcs = ["jni_layer.cpp", "log.cpp"],
headers = ["jni_layer_constants.h"],
allow_jni_merging = False,
compiler_flags = [
Expand All @@ -60,6 +62,7 @@ fb_android_cxx_library(
soname = "libexecutorch.$(ext)",
visibility = ["PUBLIC"],
deps = [
":log_provider_static",
":generated_op_lib_optimized_static",
"//fbandroid/libraries/fbjni:fbjni",
"//fbandroid/native/fb:fb",
Expand Down Expand Up @@ -88,6 +91,7 @@ fb_android_cxx_library(
soname = "libexecutorch.$(ext)",
visibility = ["PUBLIC"],
deps = [
":log_provider_static",
"//fbandroid/libraries/fbjni:fbjni",
"//fbandroid/native/fb:fb",
"//third-party/glog:glog",
Expand All @@ -101,3 +105,18 @@ fb_android_cxx_library(
"//xplat/executorch/extension/threadpool:threadpool_static",
],
)

runtime.cxx_library(
name = "log_provider",
srcs = ["log.cpp"],
exported_headers = ["log.h"],
compiler_flags = [
"-frtti",
"-fexceptions",
"-Wno-unused-variable",
],
deps = [
"//executorch/runtime/core:core",
],
visibility = ["@EXECUTORCH_CLIENTS"],
)
112 changes: 21 additions & 91 deletions extension/android/jni/jni_layer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@

#include "jni_layer_constants.h"

#include <executorch/extension/android/jni/log.h>
#include <executorch/extension/module/module.h>
#include <executorch/extension/runner_util/inputs.h>
#include <executorch/extension/tensor/tensor.h>
Expand All @@ -36,76 +37,6 @@
using namespace executorch::extension;
using namespace torch::executor;

#ifdef __ANDROID__
#include <android/log.h>
#include <mutex>
#include <sstream>

// Number of entries to store in the in-memory log buffer.
const size_t log_buffer_length = 16;

struct log_entry {
et_timestamp_t timestamp;
et_pal_log_level_t level;
std::string filename;
std::string function;
size_t line;
std::string message;

log_entry(
et_timestamp_t timestamp,
et_pal_log_level_t level,
const char* filename,
const char* function,
size_t line,
const char* message,
size_t length)
: timestamp(timestamp),
level(level),
filename(filename),
function(function),
line(line),
message(message, length) {}
};

namespace {
std::vector<log_entry> log_buffer_;
std::mutex log_buffer_mutex_;
} // namespace

// For Android, write to logcat
void et_pal_emit_log_message(
et_timestamp_t timestamp,
et_pal_log_level_t level,
const char* filename,
const char* function,
size_t line,
const char* message,
size_t length) {
std::lock_guard<std::mutex> guard(log_buffer_mutex_);

while (log_buffer_.size() >= log_buffer_length) {
log_buffer_.erase(log_buffer_.begin());
}

log_buffer_.emplace_back(
timestamp, level, filename, function, line, message, length);

int android_log_level = ANDROID_LOG_UNKNOWN;
if (level == 'D') {
android_log_level = ANDROID_LOG_DEBUG;
} else if (level == 'I') {
android_log_level = ANDROID_LOG_INFO;
} else if (level == 'E') {
android_log_level = ANDROID_LOG_ERROR;
} else if (level == 'F') {
android_log_level = ANDROID_LOG_FATAL;
}

__android_log_print(android_log_level, "ExecuTorch", "%s", message);
}
#endif

namespace executorch::extension {
class TensorHybrid : public facebook::jni::HybridClass<TensorHybrid> {
public:
Expand Down Expand Up @@ -437,24 +368,26 @@ class ExecuTorchJni : public facebook::jni::HybridClass<ExecuTorchJni> {
facebook::jni::local_ref<facebook::jni::JArrayClass<jstring>>
readLogBuffer() {
#ifdef __ANDROID__
std::lock_guard<std::mutex> guard(log_buffer_mutex_);

const auto size = log_buffer_.size();
facebook::jni::local_ref<facebook::jni::JArrayClass<jstring>> ret =
facebook::jni::JArrayClass<jstring>::newArray(size);

for (auto i = 0u; i < size; i++) {
const auto& entry = log_buffer_[i];
// Format the log entry as "[TIMESTAMP FUNCTION FILE:LINE] LEVEL MESSAGE".
std::stringstream ss;
ss << "[" << entry.timestamp << " " << entry.function << " "
<< entry.filename << ":" << entry.line << "] "
<< static_cast<char>(entry.level) << " " << entry.message;

facebook::jni::local_ref<facebook::jni::JString> jstr_message =
facebook::jni::make_jstring(ss.str().c_str());
(*ret)[i] = jstr_message;
}

facebook::jni::local_ref<facebook::jni::JArrayClass<jstring>> ret;

access_log_buffer([&](std::vector<log_entry>& buffer) {
const auto size = buffer.size();
ret = facebook::jni::JArrayClass<jstring>::newArray(size);
for (auto i = 0u; i < size; i++) {
const auto& entry = buffer[i];
// Format the log entry as "[TIMESTAMP FUNCTION FILE:LINE] LEVEL
// MESSAGE".
std::stringstream ss;
ss << "[" << entry.timestamp << " " << entry.function << " "
<< entry.filename << ":" << entry.line << "] "
<< static_cast<char>(entry.level) << " " << entry.message;

facebook::jni::local_ref<facebook::jni::JString> jstr_message =
facebook::jni::make_jstring(ss.str().c_str());
(*ret)[i] = jstr_message;
}
});

return ret;
#else
Expand All @@ -468,10 +401,7 @@ class ExecuTorchJni : public facebook::jni::HybridClass<ExecuTorchJni> {
makeNativeMethod("forward", ExecuTorchJni::forward),
makeNativeMethod("execute", ExecuTorchJni::execute),
makeNativeMethod("loadMethod", ExecuTorchJni::load_method),

#ifdef __ANDROID__
makeNativeMethod("readLogBuffer", ExecuTorchJni::readLogBuffer),
#endif
});
}
};
Expand Down
69 changes: 69 additions & 0 deletions extension/android/jni/log.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
/*
* Copyright (c) Meta Platforms, Inc. and affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/

#include "log.h"

#ifdef __ANDROID__

#include <android/log.h>
#include <functional>
#include <mutex>
#include <sstream>

using executorch::extension::log_entry;

// Number of entries to store in the in-memory log buffer.
const size_t log_buffer_length = 16;

namespace {
std::vector<log_entry> log_buffer_;
std::mutex log_buffer_mutex_;
} // namespace

// For Android, write to logcat
void et_pal_emit_log_message(
et_timestamp_t timestamp,
et_pal_log_level_t level,
const char* filename,
const char* function,
size_t line,
const char* message,
size_t length) {
std::lock_guard<std::mutex> guard(log_buffer_mutex_);

while (log_buffer_.size() >= log_buffer_length) {
log_buffer_.erase(log_buffer_.begin());
}

log_buffer_.emplace_back(
timestamp, level, filename, function, line, message, length);

int android_log_level = ANDROID_LOG_UNKNOWN;
if (level == 'D') {
android_log_level = ANDROID_LOG_DEBUG;
} else if (level == 'I') {
android_log_level = ANDROID_LOG_INFO;
} else if (level == 'E') {
android_log_level = ANDROID_LOG_ERROR;
} else if (level == 'F') {
android_log_level = ANDROID_LOG_FATAL;
}

__android_log_print(android_log_level, "ExecuTorch", "%s", message);
}

namespace executorch::extension {

void access_log_buffer(std::function<void(std::vector<log_entry>&)> accessor) {
std::lock_guard<std::mutex> guard(log_buffer_mutex_);
accessor(log_buffer_);
}

} // namespace executorch::extension

#endif
Loading

0 comments on commit 6b9cc29

Please sign in to comment.