Spaces:
Running
Running
File size: 1,418 Bytes
bf7e729 e012a04 2aed0aa e012a04 bf7e729 e012a04 bf7e729 e012a04 bf7e729 e012a04 bf7e729 2aed0aa e012a04 bf7e729 e012a04 bf7e729 e012a04 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
import argparse
from .pipeline import PredictionPipeline
from .models import (
EloBaselineModel,
LogisticRegressionModel,
XGBoostModel,
SVCModel,
RandomForestModel,
BernoulliNBModel,
LGBMModel
)
def main():
"""
Main entry point to run the prediction pipeline.
You can specify which models to run and the reporting format.
"""
parser = argparse.ArgumentParser(description="UFC Fight Prediction Pipeline")
parser.add_argument(
'--report',
type=str,
default='detailed',
choices=['detailed', 'summary'],
help="Type of report to generate: 'detailed' (file) or 'summary' (console)."
)
args = parser.parse_args()
# --- Define Models to Run ---
# Instantiate all the models you want to evaluate here.
models_to_run = [
EloBaselineModel(),
LogisticRegressionModel(),
XGBoostModel(),
SVCModel(),
RandomForestModel(),
BernoulliNBModel(),
LGBMModel(),
]
# --- End of Model Definition ---
pipeline = PredictionPipeline(models=models_to_run)
try:
pipeline.run(detailed_report=(args.report == 'detailed'))
except FileNotFoundError as e:
print(f"Error: {e}")
print("Please ensure the required data files exist. You may need to run the scraping and ELO analysis first.")
if __name__ == '__main__':
main() |