|
| 1 | +# Copyright (c) Microsoft Corporation. |
| 2 | +# Licensed under the MIT license. |
| 3 | + |
| 4 | +"""Unified PyTorch deterministic training example for all supported models. |
| 5 | +
|
| 6 | +Deterministic metrics (loss, activation mean) are automatically stored in results |
| 7 | +when --enable_determinism flag is enabled. |
| 8 | +
|
| 9 | +To compare deterministic results between runs, use the `sb result diagnosis` command |
| 10 | +with a baseline file and comparison rules. See the SuperBench documentation for details. |
| 11 | +
|
| 12 | +Example workflow: |
| 13 | +1. Run first benchmark (creates outputs/<timestamp>/results-summary.jsonl): |
| 14 | + python3 examples/benchmarks/pytorch_deterministic_example.py \ |
| 15 | + --model resnet101 --enable_determinism --deterministic_seed 42 |
| 16 | +
|
| 17 | +2. Generate baseline from results: |
| 18 | + sb result generate-baseline --data-file outputs/<timestamp>/results-summary.jsonl \ |
| 19 | + --summary-rule-file summary-rules.yaml --output-dir outputs/<timestamp> |
| 20 | +
|
| 21 | +3. Run second benchmark: |
| 22 | + python3 examples/benchmarks/pytorch_deterministic_example.py \ |
| 23 | + --model resnet101 --enable_determinism --deterministic_seed 42 |
| 24 | +
|
| 25 | +4. Compare runs with diagnosis: |
| 26 | + sb result diagnosis --data-file outputs/<run2-timestamp>/results-summary.jsonl \ |
| 27 | + --rule-file rules.yaml --baseline-file outputs/<run1-timestamp>/baseline.json |
| 28 | +
|
| 29 | +Note: CUBLAS_WORKSPACE_CONFIG is now automatically set by the code when determinism is enabled. |
| 30 | +""" |
| 31 | + |
| 32 | +import argparse |
| 33 | +import json |
| 34 | +import socket |
| 35 | +from datetime import datetime |
| 36 | +from pathlib import Path |
| 37 | +from superbench.benchmarks import BenchmarkRegistry, Framework |
| 38 | +from superbench.common.utils import logger |
| 39 | + |
| 40 | +MODEL_CHOICES = [ |
| 41 | + 'bert-large', |
| 42 | + 'gpt2-small', |
| 43 | + 'llama2-7b', |
| 44 | + 'mixtral-8x7b', |
| 45 | + 'resnet101', |
| 46 | + 'lstm', |
| 47 | +] |
| 48 | + |
| 49 | +DEFAULT_PARAMS = { |
| 50 | + 'bert-large': |
| 51 | + '--batch_size 1 --seq_len 64 --num_warmup 1 --num_steps 200 --precision float32 ' |
| 52 | + '--model_action train --check_frequency 20', |
| 53 | + 'gpt2-small': |
| 54 | + '--batch_size 1 --num_steps 300 --num_warmup 1 --seq_len 128 --precision float32 ' |
| 55 | + '--model_action train --check_frequency 20', |
| 56 | + 'llama2-7b': |
| 57 | + '--batch_size 1 --num_steps 300 --num_warmup 1 --seq_len 512 --precision float32 --model_action train ' |
| 58 | + '--check_frequency 20', |
| 59 | + 'mixtral-8x7b': |
| 60 | + '--hidden_size 4096 --num_hidden_layers 32 --num_attention_heads 32 --intermediate_size 14336 ' |
| 61 | + '--num_key_value_heads 8 --max_position_embeddings 32768 --router_aux_loss_coef 0.02 ' |
| 62 | + '--check_frequency 20', |
| 63 | + 'resnet101': |
| 64 | + '--batch_size 1 --precision float32 --num_warmup 1 --num_steps 120 --sample_count 8192 ' |
| 65 | + '--pin_memory --model_action train --check_frequency 20', |
| 66 | + 'lstm': |
| 67 | + '--batch_size 1 --num_steps 100 --num_warmup 2 --seq_len 64 --precision float32 ' |
| 68 | + '--model_action train --check_frequency 30', |
| 69 | +} |
| 70 | + |
| 71 | + |
| 72 | +def main(): |
| 73 | + """Main function for determinism example file.""" |
| 74 | + parser = argparse.ArgumentParser(description='Unified PyTorch deterministic training example.') |
| 75 | + parser.add_argument('--model', type=str, choices=MODEL_CHOICES, required=True, help='Model to run.') |
| 76 | + parser.add_argument( |
| 77 | + '--enable_determinism', |
| 78 | + action='store_true', |
| 79 | + help='Enable deterministic mode for reproducible results.', |
| 80 | + ) |
| 81 | + parser.add_argument( |
| 82 | + '--deterministic_seed', |
| 83 | + type=int, |
| 84 | + default=None, |
| 85 | + help='Seed for deterministic training.', |
| 86 | + ) |
| 87 | + args = parser.parse_args() |
| 88 | + |
| 89 | + parameters = DEFAULT_PARAMS[args.model] |
| 90 | + if args.enable_determinism: |
| 91 | + parameters += ' --enable_determinism' |
| 92 | + if args.deterministic_seed is not None: |
| 93 | + parameters += f' --deterministic_seed {args.deterministic_seed}' |
| 94 | + |
| 95 | + context = BenchmarkRegistry.create_benchmark_context(args.model, parameters=parameters, framework=Framework.PYTORCH) |
| 96 | + benchmark = BenchmarkRegistry.launch_benchmark(context) |
| 97 | + logger.info(f'Benchmark finished. Return code: {benchmark.return_code}') |
| 98 | + |
| 99 | + # Create timestamped output directory |
| 100 | + timestamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') |
| 101 | + output_dir = Path('outputs') / timestamp |
| 102 | + output_dir.mkdir(parents=True, exist_ok=True) |
| 103 | + |
| 104 | + # Parse benchmark results |
| 105 | + benchmark_results = json.loads(benchmark.serialized_result) |
| 106 | + benchmark_name = benchmark_results.get('name', f'pytorch-{args.model}') |
| 107 | + |
| 108 | + # Convert to results-summary.jsonl format (flattened keys) |
| 109 | + # Use format compatible with sb result commands: model-benchmarks:<category>/<benchmark>/<metric> |
| 110 | + summary = {} |
| 111 | + prefix = f'model-benchmarks:example:determinism/{benchmark_name}' |
| 112 | + if 'result' in benchmark_results: |
| 113 | + for metric, values in benchmark_results['result'].items(): |
| 114 | + # Use first value if it's a list |
| 115 | + val = values[0] if isinstance(values, list) else values |
| 116 | + # Add _rank0 suffix to deterministic metrics for compatibility with rules |
| 117 | + if metric.startswith('deterministic_'): |
| 118 | + metric_key = f'{prefix}/{metric}_rank0' |
| 119 | + else: |
| 120 | + metric_key = f'{prefix}/{metric}' |
| 121 | + summary[metric_key] = val |
| 122 | + |
| 123 | + # Add node identifier |
| 124 | + summary['node'] = socket.gethostname() |
| 125 | + |
| 126 | + # Write results-summary.jsonl |
| 127 | + summary_file = output_dir / 'results-summary.jsonl' |
| 128 | + with open(summary_file, 'w') as f: |
| 129 | + f.write(json.dumps(summary)) |
| 130 | + logger.info(f'Results saved to {summary_file}') |
| 131 | + |
| 132 | + # Also save full results for reference |
| 133 | + full_results_file = output_dir / 'results-full.json' |
| 134 | + with open(full_results_file, 'w') as f: |
| 135 | + json.dump(benchmark_results, f, indent=2) |
| 136 | + |
| 137 | + if 'raw_data' in benchmark_results and 'deterministic_loss' in benchmark_results['raw_data']: |
| 138 | + num_checkpoints = len(benchmark_results['raw_data']['deterministic_loss'][0]) |
| 139 | + logger.info(f'Periodic fingerprints collected at {num_checkpoints} checkpoints') |
| 140 | + |
| 141 | + logger.info( |
| 142 | + f'To generate baseline: sb result generate-baseline ' |
| 143 | + f'--data-file {summary_file} --summary-rule-file summary-rules.yaml ' |
| 144 | + f'--output-dir {output_dir}' |
| 145 | + ) |
| 146 | + logger.info('To compare results between runs, use `sb result diagnosis` command.') |
| 147 | + |
| 148 | + |
| 149 | +if __name__ == '__main__': |
| 150 | + main() |
0 commit comments