chatLab1 / app.py
peterciank's picture
Update app.py
dab1ed6 verified
raw
history blame
767 Bytes
import os
import requests
import gradio as gr
# Define function to query the AI model
def query(payload):
token = os.getenv("HF_TOKEN", None)
headers = {"Authorization": f"Bearer {token}"}
API_URL = "https://api-inference.huggingface.co/models/facebook/bart-large-cnn"
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
# Create Gradio interface
def chat(input_text):
data = query({"inputs": input_text, "parameters": {"do_sample": False}})
return data[0]['generated_text']
input_text = gr.Textbox(lines=7, label="Input Text")
output_text = gr.Textbox(label="Model Response")
gr.Interface(fn=chat, inputs=input_text, outputs=output_text, title="AI Chat", description="Chat with AI model").launch()