|
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline |
|
import torch |
|
|
|
class EndpointHandler: |
|
def __init__(self, path=""): |
|
|
|
model_name = "niruemon/llm-swp" |
|
|
|
|
|
self.model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype=torch.float16) |
|
self.tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
|
|
|
self.generator = pipeline("text-generation", model=self.model, tokenizer=self.tokenizer, device_map="auto") |
|
|
|
def __call__(self, data): |
|
|
|
input_text = data.get("inputs", "") |
|
if not input_text: |
|
return {"error": "No input text provided."} |
|
|
|
|
|
try: |
|
result = self.generator(input_text, max_length=150, num_return_sequences=1) |
|
generated_text = result[0]["generated_text"] |
|
return {"generated_text": generated_text} |
|
except Exception as e: |
|
return {"error": str(e)} |
|
|