Skip to content

Commit

Permalink
Cherrypick: Cleanup: move to per-module loggers instead of the global…
Browse files Browse the repository at this point in the history
… logging object (#2539)
  • Loading branch information
justinxzhao authored Sep 20, 2022
1 parent 26d707b commit c2f4e42
Show file tree
Hide file tree
Showing 25 changed files with 133 additions and 100 deletions.
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -409,4 +409,4 @@ know, please consider [joining the Ludwig Slack](https://join.slack.com/t/ludwig
- [Slack](https://join.slack.com/t/ludwig-ai/shared_invite/zt-mrxo87w6-DlX5~73T2B4v_g6jj0pJcQ)
- [Twitter](https://twitter.com/ludwig_ai)
- [Medium](https://medium.com/ludwig-ai)
- [GitHub Issues](https://github.com/ludwig-ai/ludwig/issues)
- [GitHub Issues](https://github.com/ludwig-ai/ludwig/issues)
4 changes: 3 additions & 1 deletion ludwig/automl/auto_tune_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@
from ludwig.utils.defaults import merge_with_defaults
from ludwig.utils.torch_utils import initialize_pytorch

logger = logging.getLogger(__name__)

# maps variable search space that can be modified to minimum permissible value for the range
RANKED_MODIFIABLE_PARAM_LIST = {
"tabnet": OrderedDict(
Expand Down Expand Up @@ -228,7 +230,7 @@ def memory_tune_config(config, dataset, model_category, row_count, backend):
if reduce_text_feature_max_length(config, training_set_metadata):
reduce_text_feature_max_length(temp_config, training_set_metadata)
mem_use = compute_memory_usage(temp_config, training_set_metadata, model_category)
logging.info(f"Checking model estimated mem use {mem_use} against memory size {max_memory}")
logger.info(f"Checking model estimated mem use {mem_use} against memory size {max_memory}")
if mem_use <= max_memory:
fits_in_memory = True
break
Expand Down
4 changes: 3 additions & 1 deletion ludwig/automl/automl.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@
raise ImportError(" ray is not installed. In order to use auto_train please run pip install ludwig[ray]")


logger = logging.getLogger(__name__)

OUTPUT_DIR = "."


Expand All @@ -77,7 +79,7 @@ def best_trial_id(self) -> str:
def best_model(self) -> Optional[LudwigModel]:
checkpoint = self._experiment_analysis.best_checkpoint
if checkpoint is None:
logging.warning("No best model found")
logger.warning("No best model found")
return None

if not _ray_113:
Expand Down
2 changes: 1 addition & 1 deletion ludwig/backend/datasource.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ def _read_file(
try:
data = get_bytes_obj_from_http_path(path)
except urllib3.exceptions.HTTPError as e:
logging.warning(e)
logger.warning(e)
data = None
else:
super_result = super()._read_file(f, path, **reader_args)[0]
Expand Down
3 changes: 2 additions & 1 deletion ludwig/benchmarking/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
from ludwig.utils.data_utils import load_yaml

os.environ["TOKENIZERS_PARALLELISM"] = "false"
logger = logging.getLogger(__name__)


def setup_experiment(experiment: Dict[str, str]) -> Dict[Any, Any]:
Expand All @@ -32,7 +33,7 @@ def benchmark_one_local(experiment: Dict[str, str], export_artifacts_dict: Dict[
experiment: dictionary containing the dataset name, config path, and experiment name.
export_artifacts_dict: dictionary containing an export boolean flag and a path to export to.
"""
logging.info(f"\nRunning experiment *{experiment['experiment_name']}* on dataset *{experiment['dataset_name']}*")
logger.info(f"\nRunning experiment *{experiment['experiment_name']}* on dataset *{experiment['dataset_name']}*")

# configuring backend and paths
model_config = setup_experiment(experiment)
Expand Down
6 changes: 4 additions & 2 deletions ludwig/benchmarking/summary_dataclasses.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
from ludwig.modules.metric_registry import get_metric_classes, metric_feature_registry # noqa: F401
from ludwig.utils.data_utils import load_json

logger = logging.getLogger(__name__)


@dataclass
class MetricDiff:
Expand Down Expand Up @@ -197,7 +199,7 @@ def export_metrics_diff_to_csv(metrics_diff: MetricsDiff, path: str):
"Diff Percentage": diff_percentage,
}
)
logging.info(f"Exported a CSV report to {path}")
logger.info(f"Exported a CSV report to {path}")


def build_metrics_summary(experiment_local_directory: str) -> MetricsSummary:
Expand Down Expand Up @@ -360,7 +362,7 @@ def export_resource_usage_diff_to_csv(resource_usage_diff: ResourceUsageDiff, pa
"Diff Percentage": diff_percentage,
}
)
logging.info(f"Exported a CSV report to {path}")
logger.info(f"Exported a CSV report to {path}")


def average_runs(path_to_runs_dir: str) -> Dict[str, Union[int, float]]:
Expand Down
4 changes: 3 additions & 1 deletion ludwig/benchmarking/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
from ludwig.utils.defaults import default_random_seed
from ludwig.utils.fs_utils import get_fs_and_path

logger = logging.getLogger(__name__)


def load_from_module(
dataset_module: Union[BaseDataset, ModuleType], output_feature: Dict[str, str], subsample_frac: float = 1
Expand Down Expand Up @@ -61,7 +63,7 @@ def export_artifacts(experiment: Dict[str, str], experiment_output_directory: st
os.path.join("configs", experiment["config_path"]),
os.path.join(export_full_path, CONFIG_YAML),
)
logging.info(f"Uploaded experiment artifact to\n\t{export_full_path}")
logger.info(f"Uploaded experiment artifact to\n\t{export_full_path}")
except Exception:
logging.exception(
f"Failed to upload experiment artifacts for experiment *{experiment['experiment_name']}* on "
Expand Down
Loading

0 comments on commit c2f4e42

Please sign in to comment.