forked from pytorch/pytorch
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathpython_interpreter.cpp
77 lines (63 loc) · 2.13 KB
/
python_interpreter.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#include "torch/csrc/python_headers.h"
#include "torch/csrc/jit/interpreter.h"
#include "torch/csrc/autograd/edge.h"
#include "torch/csrc/autograd/function.h"
#include "torch/csrc/autograd/profiler.h"
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/jit/operator.h"
#include "torch/csrc/jit/custom_operator.h"
#include "torch/csrc/jit/graph_executor.h"
#include "torch/csrc/jit/ir.h"
#include "torch/csrc/jit/pybind_utils.h"
#include "torch/csrc/variable_tensor_functions.h"
#include <typeinfo>
#include "torch/csrc/autograd/python_engine.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/jit/pybind.h"
#include "torch/csrc/utils/auto_gil.h"
#include "torch/csrc/Exceptions.h"
namespace py = pybind11;
namespace torch { namespace jit {
namespace {
// Note: const_cast is used twice below to acquire a handle to a pyobject.
Operation createPythonOperation(const Node* op_) {
AutoGIL gil;
const PythonOp* op = static_cast<const PythonOp*>(op_);
const py::function func =
py::reinterpret_borrow<const py::function>(py::handle(const_cast<PythonOp*>(op)->pyobj.get()));
size_t num_inputs = 0;
for(auto arg_type : op->cconv) {
if(arg_type == 'd')
num_inputs++;
}
JIT_ASSERT(op->outputs().size() == 1);
return [=](Stack & stack) {
AutoGIL gil;
py::tuple py_inputs(op->cconv.size());
size_t i = 0;
size_t next_scalar = 0;
size_t next_tensor = 0;
for (auto arg_type : op->cconv) {
if (arg_type == 'c') {
py_inputs[i] =
py::reinterpret_borrow<const py::object>(const_cast<PythonOp*>(op)->scalar_args[next_scalar++].get());
} else if (arg_type == 'd') {
py_inputs[i] = toPyObject(std::move(peek(stack, next_tensor, num_inputs)));
next_tensor++;
}
i++;
}
drop(stack, num_inputs);
try {
py::object py_output(func(*py_inputs));
stack.push_back(returnToIValue(op->output()->type(), py_output));
} catch (py::error_already_set & e) {
throw std::runtime_error(e.what());
}
return 0;
};
}
RegisterOperators reg({
Operator(prim::PythonOp, createPythonOperation)
});
}}} // torch::jit::anon