Skip to content

Commit

Permalink
fix
Browse files Browse the repository at this point in the history
  • Loading branch information
IlyasMoutawwakil committed Feb 14, 2024
1 parent c542546 commit 45de1f9
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 20 deletions.
24 changes: 7 additions & 17 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -70,33 +70,23 @@ You can run benchmarks from the Python API, using the `launch` function from the
from optimum_benchmark.logging_utils import setup_logging
from optimum_benchmark.experiment import launch, ExperimentConfig
from optimum_benchmark.backends.pytorch.config import PyTorchConfig
from optimum_benchmark.launchers.process.config import ProcessConfig
from optimum_benchmark.launchers.torchrun.config import TorchrunConfig
from optimum_benchmark.benchmarks.inference.config import InferenceConfig


if __name__ == "__main__":
setup_logging(level="INFO")
benchmark_config = InferenceConfig(latency=False, memory=True, energy=True)
launcher_config = ProcessConfig()
backend_config = PyTorchConfig(
device="cuda",
no_weights=True,
device_ids="0,1",
device_map="auto",
model="IlyasMoutawwakil/vicuna-7b-v1.5-awq-gemm",
)
launcher_config = TorchrunConfig(nproc_per_node=2)
benchmark_config = InferenceConfig(latency=True, memory=True)
backend_config = PyTorchConfig(model="gpt2", device="cuda", device_ids="0,1", no_weights=True)
experiment_config = ExperimentConfig(
experiment_name="python-api-launch-experiment",
experiment_name="api-launch",
benchmark=benchmark_config,
launcher=launcher_config,
backend=backend_config,
)
benchmark_report = launch(experiment_config)
benchmark_report.log_all()
# or
print(benchmark_report.to_dict())
# or
benchmark_report.push_to_hub("IlyasMoutawwakil/vicuna-7b-v1.5-awq-gemm")
experiment_config.push_to_hub("IlyasMoutawwakil/benchmarks")
benchmark_report.push_to_hub("IlyasMoutawwakil/benchmarks")
```

Yep, it's that simple! Check the supported backends, launchers and benchmarks in the [features](#features-) section.
Expand Down
20 changes: 20 additions & 0 deletions examples/api_launch.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from optimum_benchmark.logging_utils import setup_logging
from optimum_benchmark.experiment import launch, ExperimentConfig
from optimum_benchmark.backends.pytorch.config import PyTorchConfig
from optimum_benchmark.launchers.torchrun.config import TorchrunConfig
from optimum_benchmark.benchmarks.inference.config import InferenceConfig

if __name__ == "__main__":
setup_logging(level="INFO")
launcher_config = TorchrunConfig(nproc_per_node=2)
benchmark_config = InferenceConfig(latency=True, memory=True)
backend_config = PyTorchConfig(model="gpt2", device="cuda", device_ids="0,1", no_weights=True)
experiment_config = ExperimentConfig(
experiment_name="api-launch",
benchmark=benchmark_config,
launcher=launcher_config,
backend=backend_config,
)
benchmark_report = launch(experiment_config)
experiment_config.push_to_hub("IlyasMoutawwakil/benchmarks")
benchmark_report.push_to_hub("IlyasMoutawwakil/benchmarks")
5 changes: 2 additions & 3 deletions optimum_benchmark/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
from .backends.neural_compressor.config import INCConfig
from .backends.text_generation_inference.config import TGIConfig

from .benchmarks.report import BenchmarkReport
from .report import BenchmarkReport
from .experiment import launch, ExperimentConfig
from .benchmarks.training.config import TrainingConfig
from .benchmarks.inference.config import InferenceConfig
Expand Down Expand Up @@ -74,8 +74,7 @@ def benchmark_cli(experiment_config: DictConfig) -> None:

# Instantiate the experiment configuration and trigger its __post_init__
experiment_config: ExperimentConfig = OmegaConf.to_object(experiment_config)
OmegaConf.save(experiment_config, "experiment_config.yaml", resolve=True)
experiment_config.to_json("experiment_config.json")

benchmark_report: BenchmarkReport = launch(experiment_config=experiment_config)

benchmark_report.to_json("benchmark_report.json")

0 comments on commit 45de1f9

Please sign in to comment.