Skip to content

Commit

Permalink
增加反向节点地址信息获取, 增加打印边的信息, 不修改原有日志等级
Browse files Browse the repository at this point in the history
  • Loading branch information
qiuwenbogdut committed Sep 15, 2023
2 parents fd0c655 + 0fd9e8f commit e6edf3d
Show file tree
Hide file tree
Showing 10 changed files with 135 additions and 4 deletions.
7 changes: 6 additions & 1 deletion paddle/fluid/eager/accumulation/accumulation_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -132,21 +132,26 @@ GradNodeAccumulation::operator()(
if (ReduceHooksRegistered()) {
ApplyReduceHooks();
}

VLOG(3) << "Finish AD API Grad: GradNodeAccumulation";
if (VLOG_IS_ON(4)) {
const char* INPUT_PRINT_TEMPLATE = "{ Input: [%s], Output: [%s] } ";

std::string input_str = "";
std::string output_str = "";

const char* TENSOR_OUT_GRAD_TEMPLATE = "(grads[0][0], [%s]), ";
std::string input_out_grad_str = paddle::string::Sprintf(
TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0]));
TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grads[0][0]));
input_str += input_out_grad_str;
const char* TENSOR_X_GRAD_TEMPLATE = "(grad_out, [%s]), ";
std::string output_x_grad_str = paddle::string::Sprintf(
TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
output_str += output_x_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}
return {{grad_out}};
}
Expand Down
24 changes: 24 additions & 0 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/add_n_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,30 @@ AddNGradNodeFinal::operator()(
egr::CheckTensorHasNanOrInf("add_n_grad", returns);
}

if (VLOG_IS_ON(4)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( x , [%s]), ";
std::string input_x_str =
paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(x));
input_str += input_x_str;

const char *TENSOR_OUT_GRAD_TEMPLATE = " \n( out_grad , [%s]), ";
std::string input_out_grad_str =
paddle::string::Sprintf(TENSOR_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(out_grad));
input_str += input_out_grad_str;

const char *TENSOR_OUTPUT_TEMPLATE = " \n ( returns , [%s]), ";
std::string output_returns_str = paddle::string::Sprintf(
TENSOR_OUTPUT_TEMPLATE, egr::EagerUtils::TensorStr(returns[0][0]));
output_str += output_returns_str;

VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}

if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
}
86 changes: 86 additions & 0 deletions paddle/fluid/eager/api/manual/eager_manual/nodes/conv2d_nodes.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,41 @@ Conv2dGradNodeFinal::operator()(
// Set TensorWrappers for Forward Outputs if needed
}

if (VLOG_IS_ON(4)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), ";
std::string input_input_str =
paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input));
input_str += input_input_str;

const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), ";
std::string input_filter_str =
paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter));
input_str += input_filter_str;

const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), ";
std::string input_grad_out_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
input_str += input_grad_out_str;

const char *TENSOR_GRAD_INPUT_TEMPLATE = " \n ( grad_input , [%s]), ";
std::string output_grad_input_str = paddle::string::Sprintf(
TENSOR_GRAD_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_input));
output_str += output_grad_input_str;

const char *TENSOR_GRAD_FILTER_TEMPLATE = " \n ( grad_filter , [%s]), ";
std::string output_grad_filter_str = paddle::string::Sprintf(
TENSOR_GRAD_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter));
output_str += output_grad_filter_str;

VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}


// Return
if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
Expand Down Expand Up @@ -283,6 +318,57 @@ Conv2dDoubleGradNodeFinal::operator()(

// Create Grad Node


if (VLOG_IS_ON(4)) {
const char *INPUT_PRINT_TEMPLATE = "{ Input: [%s], \n Output: [%s] } ";
std::string input_str = "";
std::string output_str = "";

const char *TENSOR_INPUT_TEMPLATE = " \n( input , [%s]), ";
std::string input_input_str =
paddle::string::Sprintf(TENSOR_INPUT_TEMPLATE, egr::EagerUtils::TensorStr(input));
input_str += input_input_str;

const char *TENSOR_FILTER_TEMPLATE = " \n( filter , [%s]), ";
std::string input_filter_str =
paddle::string::Sprintf(TENSOR_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(filter));
input_str += input_filter_str;

const char *TENSOR_GRAD_OUT_TEMPLATE = " \n( grad_out , [%s]), ";
std::string input_grad_out_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
input_str += input_grad_out_str;

const char *TENSOR_GRAD_INPUT_GRAD_TEMPLATE = " \n( grad_input_grad , [%s]), ";
std::string input_grad_input_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_input_grad));
input_str += input_grad_input_grad_str;

const char *TENSOR_GRAD_FILTER_GRAD_TEMPLATE = " \n( grad_filter_grad , [%s]), ";
std::string input_grad_filter_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter_grad));
input_str += input_grad_filter_grad_str;

const char *TENSOR_INPUT_GRAD_TEMPLATE = " \n( input_grad , [%s]), ";
std::string output_input_grad_str =
paddle::string::Sprintf(TENSOR_INPUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(input_grad));
output_str += output_input_grad_str;

const char *TENSOR_FILTER_GRAD_TEMPLATE = " \n( filter_grad , [%s]), ";
std::string output_filter_grad_str =
paddle::string::Sprintf(TENSOR_FILTER_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(filter_grad));
output_str += output_filter_grad_str;

const char *TENSOR_GRAD_OUT_GRAD_TEMPLATE = " \n( grad_out_grad , [%s]) ";
std::string output_grad_out_grad_str =
paddle::string::Sprintf(TENSOR_GRAD_OUT_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out_grad));
output_str += output_grad_out_grad_str;

VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}


// Return
if (NeedComplexToRealConversion()) HandleComplexGradToRealGrad(&returns);
return returns;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,8 @@ MultiplyGradNode::operator()(
output_str += output_y_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

// Return
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -459,6 +459,8 @@ SyncBatchNormGradNode::operator()(
output_str += output_bias_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this << ", " << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

// Return
Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/eager/auto_code_generator/generator/eager_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,7 @@ class {} : public egr::GradNodeBase {{
GRAD_FUNCTION_TEMPLATE = """
paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize> {}::operator()(paddle::small_vector<std::vector<paddle::Tensor>, egr::kSlotSmallVectorSize>& grads, bool create_graph, bool is_new_grad) {{
VLOG(3) << \"Running AD API GRAD: \" << \"{}\";
// Fill Zero For GradIn Tensors
{}
// Apply Gradient Hooks
Expand Down Expand Up @@ -204,7 +205,9 @@ class {} : public egr::GradNodeBase {{
// Create Grad Node
{}
VLOG(4) << \"Finish AD API GRAD: {}";
VLOG(6) << "gradnode_ptr = " << this;
// LOG IF DEBUG
{}
// Return
{}
Expand Down Expand Up @@ -263,6 +266,7 @@ class {} : public egr::GradNodeBase {{
const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], \\n Output: [%s] }} \";
{}
VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}}
"""

Expand Down
4 changes: 4 additions & 0 deletions paddle/fluid/eager/grad_node_info.cc
Original file line number Diff line number Diff line change
Expand Up @@ -575,4 +575,8 @@ std::vector<std::shared_ptr<GradNodeBase>> GradNodeBase::NextFunctions() {
return next_nodes;
}

uintptr_t GradNodeBase::GetThisPtr() const {
return reinterpret_cast<uintptr_t>(this);
}

} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/grad_node_info.h
Original file line number Diff line number Diff line change
Expand Up @@ -253,6 +253,8 @@ class GradNodeBase {

std::vector<std::shared_ptr<egr::GradNodeBase>> NextFunctions();

uintptr_t GetThisPtr() const;

/**
* Apply GradientHook
* **/
Expand Down
6 changes: 3 additions & 3 deletions paddle/fluid/eager/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ class EagerUtils {
}
if (VLOG_IS_ON(11)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d, Ptr: %d "
"{Name: %s, Initialized: %d, Ptr: %d, "
"TensorInfo: [ %s ], Value:[ %s ], ADInfo:[ %s ]}";
auto* ad_meta = nullable_autograd_meta(t);
if (ad_meta && (ad_meta->WeakGrad().lock().get())) {
Expand Down Expand Up @@ -306,7 +306,7 @@ class EagerUtils {
}
} else if (VLOG_IS_ON(6)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d, Ptr: %d "
"{Name: %s, Initialized: %d, Ptr: %d,"
"TensorInfo: [ %s ], ADInfo:[ %s ]}";
auto* ad_meta = nullable_autograd_meta(t);
if (ad_meta && (ad_meta->WeakGrad().lock().get())) {
Expand All @@ -333,7 +333,7 @@ class EagerUtils {
}
} else if (VLOG_IS_ON(5)) {
const char* TENSOR_PRINT_TEMPLATE =
"{Name: %s, Initialized: %d , Ptr: %d "
"{Name: %s, Initialized: %d , Ptr: %d, "
"TensorInfo: [ %s ]}";
return paddle::string::Sprintf(TENSOR_PRINT_TEMPLATE,
tensor_name_str,
Expand Down
2 changes: 2 additions & 0 deletions paddle/fluid/pybind/pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -789,6 +789,8 @@ PYBIND11_MODULE(libpaddle, m) {
[](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->NextFunctions();
})

.def("node_this_ptr", &egr::GradNodeBase::GetThisPtr)
.def("input_meta",
[](const std::shared_ptr<egr::GradNodeBase> &self) {
return self->InputMeta();
Expand Down

0 comments on commit e6edf3d

Please sign in to comment.