Spaces:
Running
Running
laserbeam2045
commited on
Commit
·
af0df21
1
Parent(s):
4a8694d
fix
Browse files- app.py +6 -6
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,7 +1,7 @@
|
|
1 |
import os
|
2 |
import torch
|
3 |
from fastapi import FastAPI
|
4 |
-
from transformers import
|
5 |
from pydantic import BaseModel
|
6 |
import logging
|
7 |
|
@@ -12,17 +12,17 @@ logger = logging.getLogger(__name__)
|
|
12 |
app = FastAPI()
|
13 |
|
14 |
# モデルロード
|
15 |
-
model_name = "google/gemma-
|
16 |
try:
|
17 |
logger.info(f"Loading model: {model_name}")
|
18 |
-
|
19 |
model = AutoModelForCausalLM.from_pretrained(
|
20 |
model_name,
|
21 |
torch_dtype=torch.bfloat16,
|
22 |
device_map="auto",
|
23 |
token=os.getenv("HF_TOKEN"),
|
24 |
low_cpu_mem_usage=True,
|
25 |
-
load_in_4bit=True
|
26 |
)
|
27 |
logger.info("Model loaded successfully")
|
28 |
except Exception as e:
|
@@ -37,9 +37,9 @@ class TextInput(BaseModel):
|
|
37 |
async def generate_text(input: TextInput):
|
38 |
try:
|
39 |
logger.info(f"Generating text for input: {input.text}")
|
40 |
-
inputs =
|
41 |
outputs = model.generate(**inputs, max_length=input.max_length)
|
42 |
-
result =
|
43 |
logger.info(f"Generated text: {result}")
|
44 |
return {"generated_text": result}
|
45 |
except Exception as e:
|
|
|
1 |
import os
|
2 |
import torch
|
3 |
from fastapi import FastAPI
|
4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
from pydantic import BaseModel
|
6 |
import logging
|
7 |
|
|
|
12 |
app = FastAPI()
|
13 |
|
14 |
# モデルロード
|
15 |
+
model_name = "google/gemma-2-2b-it"
|
16 |
try:
|
17 |
logger.info(f"Loading model: {model_name}")
|
18 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, token=os.getenv("HF_TOKEN"))
|
19 |
model = AutoModelForCausalLM.from_pretrained(
|
20 |
model_name,
|
21 |
torch_dtype=torch.bfloat16,
|
22 |
device_map="auto",
|
23 |
token=os.getenv("HF_TOKEN"),
|
24 |
low_cpu_mem_usage=True,
|
25 |
+
load_in_4bit=True
|
26 |
)
|
27 |
logger.info("Model loaded successfully")
|
28 |
except Exception as e:
|
|
|
37 |
async def generate_text(input: TextInput):
|
38 |
try:
|
39 |
logger.info(f"Generating text for input: {input.text}")
|
40 |
+
inputs = tokenizer(input.text, return_tensors="pt").to("cuda" if torch.cuda.is_available() else "cpu")
|
41 |
outputs = model.generate(**inputs, max_length=input.max_length)
|
42 |
+
result = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
43 |
logger.info(f"Generated text: {result}")
|
44 |
return {"generated_text": result}
|
45 |
except Exception as e:
|
requirements.txt
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
huggingface_hub==0.25.2
|
2 |
torch==2.1.0
|
|
|
3 |
transformers==4.44.2
|
4 |
bitsandbytes==0.42.0
|
5 |
accelerate==0.26.1
|
|
|
1 |
huggingface_hub==0.25.2
|
2 |
torch==2.1.0
|
3 |
+
numpy<2.0
|
4 |
transformers==4.44.2
|
5 |
bitsandbytes==0.42.0
|
6 |
accelerate==0.26.1
|