Spaces:
Runtime error
Runtime error
File size: 3,582 Bytes
0218c20 03991d8 0218c20 29331bd 6a547e4 29331bd 2ba12d8 6f6ae2a 2ba12d8 29331bd 2ba12d8 62eaea3 0218c20 2ba12d8 6a547e4 0218c20 03991d8 2ba12d8 03991d8 2ba12d8 03991d8 0218c20 03991d8 0a24068 0218c20 62eaea3 03991d8 62eaea3 2ba12d8 62eaea3 0218c20 62eaea3 0218c20 62eaea3 2ba12d8 6a547e4 62eaea3 03991d8 62eaea3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import StreamingResponse
from transformers import AutoTokenizer, AutoModelForCausalLM
import torch
import os
import asyncio
# Set cache directories
cache_dir = "/tmp/hf_home"
os.environ["HF_HOME"] = cache_dir
os.environ["TRANSFORMERS_CACHE"] = cache_dir
os.environ["HUGGINGFACE_HUB_CACHE"] = cache_dir
os.makedirs(cache_dir, exist_ok=True)
os.chmod(cache_dir, 0o777)
# Load model and tokenizer
model_name = "microsoft/DialoGPT-small"
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache_dir)
model = AutoModelForCausalLM.from_pretrained(model_name, cache_dir=cache_dir)
# Set device
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)
# Initialize FastAPI
app = FastAPI()
# Enable CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class Question(BaseModel):
question: str
SYSTEM_PROMPT = "You are a helpful, professional, and highly persuasive sales assistant for a premium web development and AI service website. Your tone is friendly, respectful, and high-end, making users feel valued. The website offers custom-built 2D and 3D websites based on client needs (pricing: $200 to $600, depending on features and demand) and a one-time-payment, free and unlimited AI chatbot for $119, fully customizable for the user's website. Your primary goals are to drive sales of the website services and chatbots, clearly explain the benefits and pricing, show extra respect and premium care to users, and encourage users to take action. Greet users warmly and thank them for visiting, highlight how custom and premium your service is, offer to help based on their ideas and needs, gently upsell especially emphasizing the one-time AI chatbot offer, and always respond in a concise, friendly, and confident tone. Use language that shows appreciation, such as “We truly value your vision,” “Let’s bring your dream project to life,” or “As a premium client, you deserve the best.” Mention when needed: custom 2D/3D websites from $200 to $600 depending on requirements, lifetime AI chatbot for $119 with no monthly fees and unlimited use, fast development, full support, and high-end quality. Never say “I don’t know,” “That’s not possible,” or “Sorry.” Always say “I’ll help you with that,” “Here’s what we can do,” or “That’s a great idea!”"
chat_history_ids = None # for continuous conversation
async def generate_response_chunks(prompt: str):
global chat_history_ids
new_input_ids = tokenizer.encode(SYSTEM_PROMPT + " User: " + prompt + " Bot:", return_tensors='pt').to(device)
if chat_history_ids is not None:
input_ids = torch.cat([chat_history_ids, new_input_ids], dim=-1)
else:
input_ids = new_input_ids
output_ids = model.generate(
input_ids,
max_new_tokens=200,
do_sample=True,
top_p=0.9,
temperature=0.7,
pad_token_id=tokenizer.eos_token_id
)
chat_history_ids = output_ids # update history
response = tokenizer.decode(output_ids[:, input_ids.shape[-1]:][0], skip_special_tokens=True)
for word in response.split():
yield word + " "
await asyncio.sleep(0.03)
@app.post("/ask")
async def ask(question: Question):
return StreamingResponse(generate_response_chunks(question.question), media_type="text/plain")
|