Skip to content

Commit

Permalink
Fix some import related style guide violations in ES (#371)
Browse files Browse the repository at this point in the history
This patch fixes some import related style guide violations,
particularly the importing of multiple modules/packages on a single
line. Some internal tooling is not able to effectively analyze these
patterns (given they don't show up often since they're banned by the
style guide) which is relatively annoying.

We also should not be importing individual classes/functions, so fix
those too when they coincide with the previous point.
  • Loading branch information
boomanaiden154 authored Sep 17, 2024
1 parent 00f9e4a commit 9c81ac6
Show file tree
Hide file tree
Showing 4 changed files with 52 additions and 36 deletions.
8 changes: 6 additions & 2 deletions compiler_opt/es/blackbox_learner_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,13 @@

from compiler_opt.distributed import worker
from compiler_opt.distributed.local import local_worker_manager
from compiler_opt.es import blackbox_learner, policy_utils
from compiler_opt.es import blackbox_learner
from compiler_opt.es import policy_utils
from compiler_opt.es import blackbox_optimizers
from compiler_opt.rl import corpus, inlining, policy_saver, registry
from compiler_opt.rl import corpus
from compiler_opt.rl import inlining
from compiler_opt.rl import policy_saver
from compiler_opt.rl import registry
from compiler_opt.rl.inlining import config as inlining_config
from compiler_opt.es import blackbox_evaluator

Expand Down
73 changes: 41 additions & 32 deletions compiler_opt/es/blackbox_optimizers_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@
import numpy as np

from compiler_opt.es import blackbox_optimizers
from compiler_opt.es.blackbox_optimizers import EstimatorType, UpdateMethod, RegressionType
from compiler_opt.es import gradient_ascent_optimization_algorithms

perturbation_array = np.array([[0, 1], [2, -1], [4, 2],
Expand All @@ -65,10 +64,12 @@
class BlackboxOptimizationAlgorithmsTest(parameterized.TestCase):

@parameterized.parameters(
(perturbation_array, function_value_array, EstimatorType.ANTITHETIC, 3,
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.ANTITHETIC, 3,
np.array([[4, 2], [2, 6], [-1, 5], [-2, -2], [8, -6], [1, -5]
]), np.array([10, -8, 4, -10, 8, -4])),
(perturbation_array, function_value_array, EstimatorType.FORWARD_FD, 5,
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.FORWARD_FD, 5,
np.array([[4, 2], [8, -6], [-1, 5], [0, -3], [2, -1]
]), np.array([10, 8, 4, 2, 1])))
def test_filtering(self, perturbations, function_values, est_type,
Expand All @@ -79,21 +80,23 @@ def test_filtering(self, perturbations, function_values, est_type,
np.testing.assert_array_equal(expected_fs, top_fs)

@parameterized.parameters(
(perturbation_array, function_value_array, EstimatorType.ANTITHETIC, 3,
np.array([100, -16])), (perturbation_array, function_value_array,
EstimatorType.FORWARD_FD, 5, np.array([76, -9])),
(perturbation_array, function_value_array, EstimatorType.ANTITHETIC, 0,
np.array([102, -34])),
(perturbation_array, function_value_array, EstimatorType.FORWARD_FD, 0,
np.array([74, -34])))
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.ANTITHETIC, 3, np.array([100, -16])),
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.FORWARD_FD, 5, np.array([76, -9])),
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.ANTITHETIC, 0, np.array([102, -34])),
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.FORWARD_FD, 0, np.array([74, -34])))
def test_monte_carlo_gradient(self, perturbations, function_values, est_type,
num_top_directions, expected_gradient):
precision_parameter = 0.1
step_size = 0.01
current_value = 2
blackbox_object = blackbox_optimizers.MonteCarloBlackboxOptimizer(
precision_parameter, est_type, False, UpdateMethod.NO_METHOD, None,
step_size, num_top_directions)
precision_parameter, est_type, False,
blackbox_optimizers.UpdateMethod.NO_METHOD, None, step_size,
num_top_directions)
current_input = np.zeros(2)
step = blackbox_object.run_step(perturbations, function_values,
current_input, current_value)
Expand All @@ -106,13 +109,14 @@ def test_monte_carlo_gradient(self, perturbations, function_values, est_type,
np.testing.assert_array_almost_equal(expected_gradient, gradient)

@parameterized.parameters(
(perturbation_array, function_value_array, EstimatorType.ANTITHETIC, 3,
np.array([100, -16])), (perturbation_array, function_value_array,
EstimatorType.FORWARD_FD, 5, np.array([76, -9])),
(perturbation_array, function_value_array, EstimatorType.ANTITHETIC, 0,
np.array([102, -34])),
(perturbation_array, function_value_array, EstimatorType.FORWARD_FD, 0,
np.array([74, -34])))
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.ANTITHETIC, 3, np.array([100, -16])),
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.FORWARD_FD, 5, np.array([76, -9])),
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.ANTITHETIC, 0, np.array([102, -34])),
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.FORWARD_FD, 0, np.array([74, -34])))
def test_monte_carlo_gradient_with_gradient_ascent_optimizer(
self, perturbations, function_values, est_type, num_top_directions,
expected_gradient):
Expand All @@ -124,8 +128,9 @@ def test_monte_carlo_gradient_with_gradient_ascent_optimizer(
step_size, 0.0))
blackbox_object = (
blackbox_optimizers.MonteCarloBlackboxOptimizer(
precision_parameter, est_type, False, UpdateMethod.NO_METHOD, None,
None, num_top_directions, gradient_ascent_optimizer))
precision_parameter, est_type, False,
blackbox_optimizers.UpdateMethod.NO_METHOD, None, None,
num_top_directions, gradient_ascent_optimizer))
current_input = np.zeros(2)
step = blackbox_object.run_step(perturbations, function_values,
current_input, current_value)
Expand All @@ -137,15 +142,18 @@ def test_monte_carlo_gradient_with_gradient_ascent_optimizer(

np.testing.assert_array_almost_equal(expected_gradient, gradient)

@parameterized.parameters(
(perturbation_array, function_value_array, EstimatorType.ANTITHETIC, 3,
np.array([0.00483, -0.007534])),
(perturbation_array, function_value_array, EstimatorType.FORWARD_FD, 5,
np.array([0.012585, 0.000748])),
(perturbation_array, function_value_array, EstimatorType.ANTITHETIC, 0,
np.array([0.019319, -0.030134])),
(perturbation_array, function_value_array, EstimatorType.FORWARD_FD, 0,
np.array([0.030203, 0.001796])))
@parameterized.parameters((perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.ANTITHETIC, 3,
np.array([0.00483, -0.007534])),
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.FORWARD_FD, 5,
np.array([0.012585, 0.000748])),
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.ANTITHETIC, 0,
np.array([0.019319, -0.030134])),
(perturbation_array, function_value_array,
blackbox_optimizers.EstimatorType.FORWARD_FD, 0,
np.array([0.030203, 0.001796])))
def test_sklearn_gradient(self, perturbations, function_values, est_type,
num_top_directions, expected_gradient):
precision_parameter = 0.1
Expand All @@ -156,8 +164,9 @@ def test_sklearn_gradient(self, perturbations, function_values, est_type,
gradient_ascent_optimization_algorithms.MomentumOptimizer(
step_size, 0.0))
blackbox_object = blackbox_optimizers.SklearnRegressionBlackboxOptimizer(
RegressionType.RIDGE, regularizer, est_type, True,
UpdateMethod.NO_METHOD, [], None, gradient_ascent_optimizer)
blackbox_optimizers.RegressionType.RIDGE, regularizer, est_type, True,
blackbox_optimizers.UpdateMethod.NO_METHOD, [], None,
gradient_ascent_optimizer)
current_input = np.zeros(2)
step = blackbox_object.run_step(perturbations, function_values,
current_input, current_value)
Expand Down
4 changes: 3 additions & 1 deletion compiler_opt/es/es_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,9 @@
# limitations under the License.
"""Local ES trainer."""

from absl import app, flags, logging
from absl import app
from absl import flags
from absl import logging
import gin

from compiler_opt.es import es_trainer_lib
Expand Down
3 changes: 2 additions & 1 deletion compiler_opt/es/es_trainer_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@
from compiler_opt.es import gradient_ascent_optimization_algorithms
from compiler_opt.es import blackbox_learner
from compiler_opt.es import policy_utils
from compiler_opt.rl import policy_saver, corpus
from compiler_opt.rl import policy_saver
from compiler_opt.rl import corpus

POLICY_NAME = "policy"

Expand Down

0 comments on commit 9c81ac6

Please sign in to comment.