Skip to content

Commit

Permalink
Pass lint
Browse files Browse the repository at this point in the history
  • Loading branch information
jaywonchung committed Aug 2, 2023
1 parent 4f8bf0b commit 9c6dfac
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 13 deletions.
4 changes: 1 addition & 3 deletions zeus/policy/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,9 +137,7 @@ def observe(
# because sampling from an infinite precision Gaussian distribution
# always returns the mean (the observation), and it will hamper
# exploration in the early stage.
precision = (
np.inf if variance == 0.0 else np.reciprocal(variance)
) # ruff: noqa: PLR2004
precision = np.inf if variance == 0.0 else np.reciprocal(variance)
mab = self.mabs[job]
mab.arm_reward_prec[batch_size] = precision
mab.fit_arm(batch_size, arm_rewards, reset=True)
Expand Down
17 changes: 8 additions & 9 deletions zeus/run/dataloader.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
from torch.utils.data.distributed import DistributedSampler

from zeus.monitor import ZeusMonitor, Measurement
from zeus.util.check import get_env
from zeus.util.env import get_env
from zeus.util.metric import ZeusCostThresholdExceededError, zeus_cost
from zeus.util.logging import get_logger

Expand Down Expand Up @@ -957,14 +957,13 @@ def __iter__(self):
# Push profiling window for the current epoch.
# Note that both train and eval dataloaders will push one profiling window *separately*.
self._begin_measurement("__ZeusDataLoader_epoch")
# The power limit of the GPU is only changed by the train dataloader.
if self._is_train: # ruff: noqa: SIM102
# If we're not profiling, use the steady state power limit.
# If we are profiling, the power limit will be set in __next__ with warmup.
# Power limit result is already loaded in when initializing the train dataloader,
# so we just set the power limit directly.
if not self._should_profile:
self._set_gpu_steady_power_limit()
# The power limit of the GPU is only changed by the train dataloader (`self._is_train`).
# If we're not profiling, use the steady state power limit (`self._should_profile`).
# If we are profiling, the power limit will be set in __next__ with warmup.
# Power limit result is already loaded in when initializing the train dataloader,
# so we just set the power limit directly.
if self._is_train and not self._should_profile:
self._set_gpu_steady_power_limit()

return self

Expand Down
2 changes: 1 addition & 1 deletion zeus/run/master.py
Original file line number Diff line number Diff line change
Expand Up @@ -271,7 +271,7 @@ def run(
raise ValueError("You must provide a default batch size for the job.")
if job.command is None:
raise ValueError("You must provide a command format string for the job.")
if eta_knob < 0.0 or eta_knob > 1.0: # ruff: noqa: PLR2004
if eta_knob < 0.0 or eta_knob > 1.0:
raise ValueError("eta_knob must be in [0.0, 1.0].")

print(f"[Zeus Master] {job} x {num_recurrence}")
Expand Down

0 comments on commit 9c6dfac

Please sign in to comment.