|
| 1 | +# Copyright (c) Meta Platforms, Inc. and affiliates. |
| 2 | +# All rights reserved. |
| 3 | + |
| 4 | +# This source code is licensed under the license found in the |
| 5 | +# LICENSE file in the root directory of this source tree. |
| 6 | +""" |
| 7 | +Benchmark Runner |
| 8 | +
|
| 9 | +This is the main entry point for the benchmarking application. It reads the YAML configuration |
| 10 | +file and orchestrates the entire benchmarking process by: |
| 11 | +- Loading and validating benchmark configurations |
| 12 | +- Executing benchmark scenarios |
| 13 | +- Collecting and processing results |
| 14 | +- Generating reports |
| 15 | +
|
| 16 | +Usage: |
| 17 | + python benchmark_runner.py [config.yaml] |
| 18 | +
|
| 19 | +The YAML file should contain all necessary configuration parameters for the benchmarks. |
| 20 | +""" |
| 21 | + |
| 22 | +import argparse |
| 23 | +from itertools import product |
| 24 | +from typing import Any, Dict, List, Tuple |
| 25 | + |
| 26 | +import yaml |
| 27 | + |
| 28 | +from benchmarks.microbenchmarks.utils import ( |
| 29 | + BenchmarkConfig, |
| 30 | + generate_results_csv, |
| 31 | + print_results, |
| 32 | +) |
| 33 | + |
| 34 | + |
| 35 | +def get_shapes_for_config( |
| 36 | + shape_configs: List[Dict[str, Any]], |
| 37 | +) -> List[Tuple[str, List[int]]]: |
| 38 | + """Get shapes for a given configuration. |
| 39 | +
|
| 40 | + Args: |
| 41 | + shape_configs: List of shape configurations from YAML |
| 42 | +
|
| 43 | + Returns: |
| 44 | + List of tuples containing (shape_name, shape) |
| 45 | + """ |
| 46 | + shapes = [] |
| 47 | + for shape_config in shape_configs: |
| 48 | + name = shape_config["name"] |
| 49 | + if name == "custom": |
| 50 | + shapes.extend([(name, shape) for shape in shape_config["shapes"]]) |
| 51 | + else: |
| 52 | + raise NotImplementedError( |
| 53 | + f"Shape config {name} not supported. Currently only supports custom shapes." |
| 54 | + ) |
| 55 | + return shapes |
| 56 | + |
| 57 | + |
| 58 | +def get_param_combinations(model_param): |
| 59 | + """Extract all parameter combinations from a model config""" |
| 60 | + # Get all shapes |
| 61 | + shapes = get_shapes_for_config(model_param["matrix_shapes"]) |
| 62 | + |
| 63 | + # Extract all other parameters (excluding matrix_shapes) |
| 64 | + base_params = { |
| 65 | + key: value for key, value in model_param.items() if key not in ["matrix_shapes"] |
| 66 | + } |
| 67 | + |
| 68 | + return shapes, base_params |
| 69 | + |
| 70 | + |
| 71 | +def load_benchmark_configs(cli_args: argparse.Namespace) -> List[BenchmarkConfig]: |
| 72 | + """Load benchmark configurations from CLI arguments and YAML file.""" |
| 73 | + with open(cli_args.config, "r") as f: |
| 74 | + config = yaml.safe_load(f) |
| 75 | + |
| 76 | + output_dir = config.get("output_dir", "benchmarks/microbenchmarks/results") |
| 77 | + benchmark_mode = config.get("benchmark_mode", "inference") |
| 78 | + |
| 79 | + # Create all possible combinations |
| 80 | + configs = [] |
| 81 | + for model_param in config["model_params"]: |
| 82 | + shapes, params = get_param_combinations(model_param) |
| 83 | + |
| 84 | + # Create configs for all combinations |
| 85 | + for quant_config, (shape_name, shape) in product( |
| 86 | + config.get("quantization_config_recipe_names", ["baseline"]), shapes |
| 87 | + ): |
| 88 | + configs.append( |
| 89 | + BenchmarkConfig( |
| 90 | + quantization=quant_config, |
| 91 | + params=params, |
| 92 | + shape_name=shape_name, |
| 93 | + shape=shape, |
| 94 | + output_dir=output_dir, |
| 95 | + benchmark_mode=benchmark_mode, |
| 96 | + ) |
| 97 | + ) |
| 98 | + |
| 99 | + return configs |
| 100 | + |
| 101 | + |
| 102 | +def run_inference_benchmarks_from_config(configs: List[BenchmarkConfig]) -> None: |
| 103 | + """Run benchmarks using configurations from YAML file""" |
| 104 | + from benchmarks.microbenchmarks.benchmark_inference import run as run_inference |
| 105 | + |
| 106 | + results = [] |
| 107 | + print("Benchmarking Inference ......") |
| 108 | + for config in configs: |
| 109 | + try: |
| 110 | + print(f"Running: {config.name}") |
| 111 | + result = run_inference(config) # Pass the config object directly |
| 112 | + results.append(result) |
| 113 | + except Exception as e: |
| 114 | + print(f"Error running benchmark {config.name}: {e}") |
| 115 | + continue |
| 116 | + |
| 117 | + # Add results to csv |
| 118 | + generate_results_csv(results, configs[0].output_dir) |
| 119 | + |
| 120 | + # Print results |
| 121 | + print_results(results) |
| 122 | + |
| 123 | + # TODO: Process results: Speedups: |
| 124 | + # 1. For different shapes for same model and quantization |
| 125 | + # 2. For different quantizations for same model and shape |
| 126 | + # 3. For different models for same quantization |
| 127 | + |
| 128 | + |
| 129 | +if __name__ == "__main__": |
| 130 | + import argparse |
| 131 | + |
| 132 | + parser = argparse.ArgumentParser(description="Run benchmarks from config file") |
| 133 | + parser.add_argument( |
| 134 | + "--config", |
| 135 | + type=str, |
| 136 | + required=True, |
| 137 | + help="Path to benchmark configuration file", |
| 138 | + ) |
| 139 | + # TODO: Add support for args to override config values and run smaller benchmarks |
| 140 | + args = parser.parse_args() |
| 141 | + |
| 142 | + configs = load_benchmark_configs(cli_args=args) |
| 143 | + # Run benchmarks |
| 144 | + if configs[0].benchmark_mode == "inference": |
| 145 | + run_inference_benchmarks_from_config(configs) |
| 146 | + elif configs[0].benchmark_mode == "training": |
| 147 | + print("Training mode not implemented yet") |
| 148 | + else: |
| 149 | + raise ValueError( |
| 150 | + f"Invalid benchmark mode: {configs[0].benchmark_mode}, choose from inference or training" |
| 151 | + ) |
| 152 | + |
| 153 | + # TODO: Add support for args to override config values and run smaller benchmarks |
0 commit comments