Skip to content

Commit

Permalink
解决一下日志重复输出的问题
Browse files Browse the repository at this point in the history
  • Loading branch information
qiuwenbogdut committed Sep 19, 2023
1 parent 7b26f4a commit 988dece
Show file tree
Hide file tree
Showing 6 changed files with 6 additions and 19 deletions.
4 changes: 1 addition & 3 deletions paddle/fluid/eager/accumulation/accumulation_node.cc
Original file line number Diff line number Diff line change
Expand Up @@ -187,10 +187,8 @@ GradNodeAccumulation::operator()(
std::string output_x_grad_str = paddle::string::Sprintf(
TENSOR_X_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(grad_out));
output_str += output_x_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this;
VLOG(6) << paddle::string::Sprintf(
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}
return {{grad_out}};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -92,10 +92,8 @@ AddNGradNodeFinal::operator()(
TENSOR_OUTPUT_TEMPLATE, egr::EagerUtils::TensorStr(returns[0][0]));
output_str += output_returns_str;

VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this;
VLOG(6) << paddle::string::Sprintf(
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -192,10 +192,8 @@ Conv2dGradNodeFinal::operator()(
TENSOR_GRAD_FILTER_TEMPLATE, egr::EagerUtils::TensorStr(grad_filter));
output_str += output_grad_filter_str;

VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this;
VLOG(6) << paddle::string::Sprintf(
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

Expand Down Expand Up @@ -370,10 +368,8 @@ Conv2dDoubleGradNodeFinal::operator()(
egr::EagerUtils::TensorStr(grad_out_grad));
output_str += output_grad_out_grad_str;

VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this;
VLOG(6) << paddle::string::Sprintf(
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -594,10 +594,8 @@ MultiplyGradNode::operator()(
std::string output_y_grad_str = paddle::string::Sprintf(
TENSOR_Y_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(y_grad));
output_str += output_y_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this;
VLOG(6) << paddle::string::Sprintf(
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -457,10 +457,8 @@ SyncBatchNormGradNode::operator()(
std::string output_bias_grad_str = paddle::string::Sprintf(
TENSOR_BIAS_GRAD_TEMPLATE, egr::EagerUtils::TensorStr(bias_grad));
output_str += output_bias_grad_str;
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << "gradnode_ptr = " << this;
VLOG(6) << paddle::string::Sprintf(
VLOG(4) << paddle::string::Sprintf(
INPUT_PRINT_TEMPLATE, input_str, output_str);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -293,7 +293,6 @@ class {} : public egr::GradNodeBase {{
const char* INPUT_PRINT_TEMPLATE = \"{{ Input: [%s], \\n Output: [%s] }} \";
{}
VLOG(4) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
VLOG(6) << paddle::string::Sprintf(INPUT_PRINT_TEMPLATE, input_str, output_str);
}}
"""

Expand Down

0 comments on commit 988dece

Please sign in to comment.