app.py
CHANGED
@@ -117,9 +117,13 @@ def generate_text_local(model_path, text):
|
|
117 |
logger.info(f"Running local text generation with {model_path}")
|
118 |
pipeline = pipelines[model_path]
|
119 |
|
120 |
-
#
|
121 |
-
if
|
122 |
-
|
|
|
|
|
|
|
|
|
123 |
|
124 |
# デバイス情報をログに記録
|
125 |
device_info = next(pipeline.model.parameters()).device
|
@@ -129,13 +133,15 @@ def generate_text_local(model_path, text):
|
|
129 |
text,
|
130 |
max_new_tokens=40,
|
131 |
do_sample=False,
|
132 |
-
num_return_sequences=1
|
|
|
133 |
)
|
134 |
-
|
135 |
-
# モデルを
|
136 |
-
|
137 |
-
|
138 |
-
|
|
|
139 |
return outputs[0]["generated_text"]
|
140 |
except Exception as e:
|
141 |
logger.error(f"Error in local text generation with {model_path}: {str(e)}")
|
@@ -162,20 +168,25 @@ def classify_text_local(model_path, text):
|
|
162 |
logger.info(f"Running local classification with {model_path}")
|
163 |
pipeline = pipelines[model_path]
|
164 |
|
165 |
-
#
|
166 |
-
if
|
167 |
-
|
168 |
-
|
|
|
|
|
|
|
|
|
169 |
# デバイス情報をログに記録
|
170 |
device_info = next(pipeline.model.parameters()).device
|
171 |
logger.info(f"Model {model_path} is running on device: {device_info}")
|
172 |
|
173 |
-
result = pipeline(text)
|
174 |
-
|
175 |
-
# モデルを
|
176 |
-
|
177 |
-
|
178 |
-
|
|
|
179 |
return str(result)
|
180 |
except Exception as e:
|
181 |
logger.error(f"Error in local classification with {model_path}: {str(e)}")
|
|
|
117 |
logger.info(f"Running local text generation with {model_path}")
|
118 |
pipeline = pipelines[model_path]
|
119 |
|
120 |
+
# モデルをGPUに移動(パイプライン全体)
|
121 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
122 |
+
pipeline.model = pipeline.model.to(device)
|
123 |
+
|
124 |
+
# トークナイザーなど他のパイプラインコンポーネントがGPUを使用するように設定
|
125 |
+
if hasattr(pipeline, "device"):
|
126 |
+
pipeline.device = device
|
127 |
|
128 |
# デバイス情報をログに記録
|
129 |
device_info = next(pipeline.model.parameters()).device
|
|
|
133 |
text,
|
134 |
max_new_tokens=40,
|
135 |
do_sample=False,
|
136 |
+
num_return_sequences=1,
|
137 |
+
device=device # デバイスを明示的に指定
|
138 |
)
|
139 |
+
|
140 |
+
# モデルをCPUに戻す
|
141 |
+
pipeline.model = pipeline.model.to("cpu")
|
142 |
+
if hasattr(pipeline, "device"):
|
143 |
+
pipeline.device = torch.device("cpu")
|
144 |
+
|
145 |
return outputs[0]["generated_text"]
|
146 |
except Exception as e:
|
147 |
logger.error(f"Error in local text generation with {model_path}: {str(e)}")
|
|
|
168 |
logger.info(f"Running local classification with {model_path}")
|
169 |
pipeline = pipelines[model_path]
|
170 |
|
171 |
+
# モデルをGPUに移動(パイプライン全体)
|
172 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
173 |
+
pipeline.model = pipeline.model.to(device)
|
174 |
+
|
175 |
+
# トークナイザーなど他のパイプラインコンポーネントがGPUを使用するように設定
|
176 |
+
if hasattr(pipeline, "device"):
|
177 |
+
pipeline.device = device
|
178 |
+
|
179 |
# デバイス情報をログに記録
|
180 |
device_info = next(pipeline.model.parameters()).device
|
181 |
logger.info(f"Model {model_path} is running on device: {device_info}")
|
182 |
|
183 |
+
result = pipeline(text, device=device) # デバイスを明示的に指定
|
184 |
+
|
185 |
+
# モデルをCPUに戻す
|
186 |
+
pipeline.model = pipeline.model.to("cpu")
|
187 |
+
if hasattr(pipeline, "device"):
|
188 |
+
pipeline.device = torch.device("cpu")
|
189 |
+
|
190 |
return str(result)
|
191 |
except Exception as e:
|
192 |
logger.error(f"Error in local classification with {model_path}: {str(e)}")
|