Skip to content

Commit

Permalink
Revert "Load PyTorch backend as a persistent backend (#224)"
Browse files Browse the repository at this point in the history
This reverts commit 0811529.
  • Loading branch information
Tabrizian committed Aug 31, 2023
1 parent fc2ada2 commit e09631f
Show file tree
Hide file tree
Showing 3 changed files with 0 additions and 57 deletions.
39 changes: 0 additions & 39 deletions src/backend_manager.cc
Original file line number Diff line number Diff line change
Expand Up @@ -381,43 +381,4 @@ TritonBackendManager::BackendState(
return Status::Success;
}

Status
TritonBackendManager::PreloadBackend(
const std::string& backend_name,
const triton::common::BackendCmdlineConfigMap& config_map)
{
std::string backends_dir;
std::string specialized_backend_name;
std::string backend_libname;
RETURN_IF_ERROR(
BackendConfigurationGlobalBackendsDirectory(config_map, &backends_dir));
RETURN_IF_ERROR(BackendConfigurationSpecializeBackendName(
config_map, backend_name, &specialized_backend_name));
RETURN_IF_ERROR(BackendConfigurationBackendLibraryName(
specialized_backend_name, &backend_libname));

const auto backend_dir = JoinPath({backends_dir, specialized_backend_name});
const auto backend_libpath = JoinPath({backend_dir, backend_libname});
bool exists = false;
RETURN_IF_ERROR(FileExists(backend_libpath, &exists));
if (exists) {
triton::common::BackendCmdlineConfig empty_backend_cmdline_config;
const triton::common::BackendCmdlineConfig* config;
const auto& itr = config_map.find(backend_name);
if (itr == config_map.end()) {
config = &empty_backend_cmdline_config;
} else {
config = &itr->second;
}

// Backend manager would always hold a reference to the backend object
// so it is ok if this object goes out of scope.
std::shared_ptr<TritonBackend> backend;
RETURN_IF_ERROR(CreateBackend(
backend_name, backend_dir, backend_libpath, *config, &backend));
}

return Status::Success;
}

}} // namespace triton::core
6 changes: 0 additions & 6 deletions src/backend_manager.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@
#include <string>
#include <unordered_map>

#include "backend_config.h"
#include "constants.h"
#include "filesystem/api.h"
#include "server_message.h"
Expand Down Expand Up @@ -170,10 +169,6 @@ class TritonBackendManager {
const triton::common::BackendCmdlineConfig& backend_cmdline_config,
std::shared_ptr<TritonBackend>* backend);

Status PreloadBackend(
const std::string& backend_name,
const triton::common::BackendCmdlineConfigMap& config_map);

Status BackendState(
std::unique_ptr<
std::unordered_map<std::string, std::vector<std::string>>>*
Expand All @@ -182,7 +177,6 @@ class TritonBackendManager {
private:
DISALLOW_COPY_AND_ASSIGN(TritonBackendManager);
TritonBackendManager() = default;

std::unordered_map<std::string, std::shared_ptr<TritonBackend>> backend_map_;
};

Expand Down
12 changes: 0 additions & 12 deletions src/server.cc
Original file line number Diff line number Diff line change
Expand Up @@ -151,17 +151,6 @@ InferenceServer::Init()
return status;
}

// TODO: Remove once the PyTorch bug is resolved. Currently, PyTorch has some
// issues with simultaneous model loading of other backends causing a segfault
// (TF to be specific). Once those issues are resolved we can remove this
// change.
status =
backend_manager_->PreloadBackend("pytorch", backend_cmdline_config_map_);
if (!status.IsOk()) {
ready_state_ = ServerReadyState::SERVER_FAILED_TO_INITIALIZE;
return status;
}

// CacheManager
status = TritonCacheManager::Create(&cache_manager_, cache_dir_);
if (!status.IsOk()) {
Expand Down Expand Up @@ -249,7 +238,6 @@ InferenceServer::Init()
LOG_WARNING << status.Message();
}


// Create the model manager for the repository. Unless model control
// is disabled, all models are eagerly loaded when the manager is created.
bool polling_enabled = (model_control_mode_ == ModelControlMode::MODE_POLL);
Expand Down

0 comments on commit e09631f

Please sign in to comment.