File size: 2,886 Bytes
b29b783
b873bf2
e768b29
b873bf2
b29b783
b873bf2
 
 
 
 
b29b783
b873bf2
 
e768b29
b873bf2
 
 
 
b29b783
b873bf2
 
b29b783
 
 
 
b873bf2
 
 
 
 
 
 
 
 
 
 
 
b29b783
 
b873bf2
 
 
 
 
b29b783
b873bf2
b29b783
b873bf2
 
 
b29b783
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b873bf2
 
 
 
 
 
 
 
 
 
 
 
b29b783
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import os
import logging
import pandas as pd
import streamlit as st
import requests
from transformers import pipeline

# Set up logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# Load environment variables from .env file
load_dotenv()

# Get the Hugging Face API key from environment variables
hf_api_key = os.getenv("HUGGINGFACE_API_KEY")
if not hf_api_key:
    raise ValueError("HUGGINGFACE_API_KEY is not set. Please provide a valid API key.")

# Hugging Face API URL
hf_api_url = "https://api-inference.huggingface.co/models/{model_name}"

# Function to load and preprocess data
@st.cache_data
def load_data(file):
    try:
        df = pd.read_csv(file)
        return df
    except Exception as e:
        logger.error(f"Error loading CSV file: {e}")
        st.error("There was an issue loading the file. Please try again.")
        return pd.DataFrame()  # Return an empty DataFrame in case of error

# Function to call Hugging Face API for text generation
def generate_text_from_model(model_name, text_input):
    headers = {"Authorization": f"Bearer {hf_api_key}"}
    data = {"inputs": text_input}

    try:
        response = requests.post(hf_api_url.format(model_name=model_name), headers=headers, json=data)
        response.raise_for_status()
        result = response.json()
        if 'generated_text' in result:
            return result['generated_text']
        else:
            return "No result from model. Please try again."
    except requests.exceptions.RequestException as err:
        logger.error(f"Error interacting with Hugging Face API: {err}")
        st.error(f"Error interacting with Hugging Face API: {err}")
        return ""

# Streamlit app layout
def main():
    # Set a background color and style
    st.markdown(
        """
        <style>
        .stApp {
            background-color: #F4F4F9;
        }
        .stButton>button {
            background-color: #6200EE;
            color: white;
            font-size: 18px;
        }
        .stSlider>div>div>span {
            color: #6200EE;
        }
        .stTextInput>div>div>input {
            background-color: #E0E0E0;
        }
        </style>
        """,
        unsafe_allow_html=True
    )

    # Title and header
    st.title("🌟 **Hugging Face Text Generation** 🌟")
    st.markdown("### **Generate text using Hugging Face Models**")

    # User input for text generation
    model_name = st.selectbox("πŸ”Ή Select Hugging Face Model", ["gpt2", "distilgpt2", "t5-small"])
    text_input = st.text_area("πŸ”Ή Input Text", "Once upon a time...")

    # Generate text based on input
    if st.button("πŸ” Generate Text"):
        st.subheader("πŸ”” **Generated Text** πŸ””")
        generated_text = generate_text_from_model(model_name, text_input)
        st.write(f"πŸ“œ {generated_text}")

if __name__ == "__main__":
    main()