amine_dubs commited on
Commit
517b06c
·
1 Parent(s): 6f3aee6
Files changed (2) hide show
  1. backend/main.py +13 -4
  2. backend/requirements.txt +1 -0
backend/main.py CHANGED
@@ -70,11 +70,20 @@ def initialize_model():
70
  cache_dir="/tmp/transformers_cache"
71
  )
72
 
73
- # Load the model explicitly with from_tf=True
74
- print("Loading model with from_tf=True...")
 
 
 
 
 
 
 
 
 
75
  model = AutoModelForSeq2SeqLM.from_pretrained(
76
  model_name,
77
- from_tf=True, # Explicitly set from_tf=True
78
  cache_dir="/tmp/transformers_cache"
79
  )
80
 
@@ -82,7 +91,7 @@ def initialize_model():
82
  print("Creating pipeline with pre-loaded model...")
83
  translator = pipeline(
84
  "text2text-generation",
85
- model=model, # Use the model we loaded with from_tf=True
86
  tokenizer=tokenizer,
87
  device=-1, # Use CPU for compatibility (-1) or GPU if available (0)
88
  max_length=512
 
70
  cache_dir="/tmp/transformers_cache"
71
  )
72
 
73
+ # Check if TensorFlow is available
74
+ tf_available = False
75
+ try:
76
+ import tensorflow
77
+ tf_available = True
78
+ print("TensorFlow is available, will use from_tf=True")
79
+ except ImportError:
80
+ print("TensorFlow is not installed, will use default PyTorch loading")
81
+
82
+ # Load the model with appropriate settings based on TensorFlow availability
83
+ print(f"Loading model {'with from_tf=True' if tf_available else 'with default PyTorch settings'}...")
84
  model = AutoModelForSeq2SeqLM.from_pretrained(
85
  model_name,
86
+ from_tf=tf_available, # Only set True if TensorFlow is available
87
  cache_dir="/tmp/transformers_cache"
88
  )
89
 
 
91
  print("Creating pipeline with pre-loaded model...")
92
  translator = pipeline(
93
  "text2text-generation",
94
+ model=model,
95
  tokenizer=tokenizer,
96
  device=-1, # Use CPU for compatibility (-1) or GPU if available (0)
97
  max_length=512
backend/requirements.txt CHANGED
@@ -8,3 +8,4 @@ jinja2
8
  transformers
9
  torch
10
  sentencepiece
 
 
8
  transformers
9
  torch
10
  sentencepiece
11
+ tensorflow