UI changes push

#1
by adv1102 - opened
Files changed (5) hide show
  1. .gitignore +0 -1
  2. README.md +6 -76
  3. dockerfile +0 -28
  4. main.py +0 -168
  5. requirements.txt +1 -10
.gitignore DELETED
@@ -1 +0,0 @@
1
- .env
 
 
README.md CHANGED
@@ -1,84 +1,14 @@
1
  ---
2
- title: Gradio Chatbot
3
- emoji: πŸš€
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.0.1
8
  app_file: app.py
9
- pinned: true
10
- short_description: Chatbot
 
11
  ---
12
 
13
-
14
- # Gradio Chatbot : HuggingFace SLMs
15
-
16
- A modular Gradio-based application for interacting with various small language models through the Hugging Face API.
17
-
18
- ## Project Structure
19
-
20
- ```
21
- slm-poc/
22
- β”œβ”€β”€ main.py # Main application entry point
23
- β”œβ”€β”€ modules/
24
- β”‚ β”œβ”€β”€ __init__.py # Package initialization
25
- β”‚ β”œβ”€β”€ config.py # Configuration settings and constants
26
- β”‚ β”œβ”€β”€ document_processor.py # Document handling and processing
27
- β”‚ └── model_handler.py # Model interaction and response generation
28
- β”œβ”€β”€ Dockerfile # Docker configuration
29
- β”œβ”€β”€ requirements.txt # Python dependencies
30
- └── README.md # Project documentation
31
- ```
32
-
33
- ## Features
34
-
35
- - Interactive chat interface with multiple language model options
36
- - Document processing (PDF, DOCX, TXT) for question answering
37
- - Adjustable model parameters (temperature, top_p, max_length)
38
- - Streaming responses for better user experience
39
- - Docker support for easy deployment
40
-
41
- ## Setup and Running
42
-
43
- ### Local Development
44
-
45
- 1. Clone the repository
46
- 2. Install dependencies:
47
- ```
48
- pip install -r requirements.txt
49
- ```
50
- 3. Create a `.env` file with your HuggingFace API token:
51
- ```
52
- HF_TOKEN=hf_your_token_here
53
- ```
54
- 4. Run the application:
55
- ```
56
- python main.py
57
- ```
58
-
59
- ### Docker Deployment
60
-
61
- 1. Build the Docker image:
62
- ```
63
- docker build -t slm-poc .
64
- ```
65
- 2. Run the container:
66
- ```
67
- docker run -p 7860:7860 -e HF_TOKEN=hf_your_token_here slm-poc
68
- ```
69
-
70
- ## Usage
71
-
72
- 1. Access the web interface at http://localhost:7860
73
- 2. Enter your HuggingFace API token if not provided via environment variables
74
- 3. Select your preferred model and adjust parameters
75
- 4. Start chatting with the model
76
- 5. Optionally upload documents for document-based Q&A
77
-
78
- ## Supported Models
79
-
80
- T2T Inference models provided by Hugging Face via the Inference API
81
-
82
- ## License
83
-
84
- This project is licensed under the MIT License - see the LICENSE file for details.
 
1
  ---
2
+ title: SLM Chatbot
3
+ emoji: πŸ’¬
4
  colorFrom: yellow
5
  colorTo: purple
6
  sdk: gradio
7
  sdk_version: 5.0.1
8
  app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: A web UI to chat with SLMs via the HF Inference API.
12
  ---
13
 
14
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dockerfile DELETED
@@ -1,28 +0,0 @@
1
- FROM python:3.10-slim
2
-
3
- WORKDIR /app
4
-
5
- # Install system dependencies
6
- RUN apt-get update && apt-get install -y \
7
- build-essential \
8
- python3-dev \
9
- && rm -rf /var/lib/apt/lists/*
10
-
11
- # Copy requirements first for better caching
12
- COPY requirements.txt .
13
- RUN pip install --no-cache-dir -r requirements.txt
14
-
15
- # Copy application code
16
- COPY main.py .
17
- COPY modules/ ./modules/
18
-
19
- # Environment variables
20
- ENV HF_TOKEN=""
21
- ENV GRADIO_SERVER_NAME="0.0.0.0"
22
- ENV GRADIO_SERVER_PORT=7860
23
-
24
- # Run the application
25
- CMD ["python", "main.py"]
26
-
27
- # Expose port for the application
28
- EXPOSE 7860
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
main.py DELETED
@@ -1,168 +0,0 @@
1
- # main.py
2
- import os
3
- import gradio as gr
4
- import tempfile
5
- from dotenv import load_dotenv
6
- from modules.document_processor import process_document
7
- from modules.model_handler import get_model_response, get_qa_response
8
- from modules.config import MODEL_MAPPING, DEFAULT_PARAMETERS
9
-
10
- # Load environment variables
11
- load_dotenv()
12
-
13
- def setup_api_key(api_key=None):
14
- """Set up the HuggingFace API key from input or environment variables."""
15
- if api_key and api_key.startswith('hf_'):
16
- os.environ['HF_TOKEN'] = api_key
17
- return True, "API key set successfully! βœ…"
18
- elif os.getenv('HF_TOKEN') and os.getenv('HF_TOKEN').startswith('hf_'):
19
- return True, "API key already available! βœ…"
20
- else:
21
- return False, "Please enter a valid HuggingFace API key. ⚠️"
22
-
23
- def create_chat_interface():
24
- """Create the main chat interface for the application."""
25
- with gr.Blocks(title="πŸ’¬ Small Language Models - POC") as demo:
26
- # Application header
27
- gr.Markdown("# πŸ’¬ Small Language Models - POC")
28
- gr.Markdown("This chatbot uses various Language Models such as Llama 3.2, Gemma 2, Gemma 3, Phi 3.5, DeepSeek-V3, and DeepSeek-R1.")
29
-
30
- with gr.Row():
31
- with gr.Column(scale=1):
32
- # Sidebar configuration
33
- with gr.Group():
34
- api_key_input = gr.Textbox(
35
- label="HuggingFace API Token",
36
- placeholder="Enter your HF API token (hf_...)",
37
- type="password"
38
- )
39
- api_key_status = gr.Markdown("Please enter your API key.")
40
- api_key_button = gr.Button("Set API Key")
41
-
42
- with gr.Group():
43
- gr.Markdown("## Models and Parameters")
44
- model_dropdown = gr.Dropdown(
45
- choices=list(MODEL_MAPPING.keys()),
46
- label="Select Model",
47
- value=list(MODEL_MAPPING.keys())[0]
48
- )
49
-
50
- temperature_slider = gr.Slider(
51
- label="Temperature",
52
- minimum=0.01,
53
- maximum=1.0,
54
- value=DEFAULT_PARAMETERS["temperature"],
55
- step=0.01
56
- )
57
-
58
- top_p_slider = gr.Slider(
59
- label="Top P",
60
- minimum=0.01,
61
- maximum=1.0,
62
- value=DEFAULT_PARAMETERS["top_p"],
63
- step=0.01
64
- )
65
-
66
- max_length_slider = gr.Slider(
67
- label="Max Length",
68
- minimum=20,
69
- maximum=2040,
70
- value=DEFAULT_PARAMETERS["max_length"],
71
- step=5
72
- )
73
-
74
- clear_button = gr.Button("Clear Chat History")
75
-
76
- with gr.Group():
77
- gr.Markdown("## Document Upload")
78
- file_upload = gr.File(
79
- label="Upload Document (PDF, DOCX, TXT)",
80
- file_types=["pdf", "docx", "txt"]
81
- )
82
- upload_status = gr.Markdown("")
83
-
84
- with gr.Column(scale=2):
85
- # Chat interface
86
- chatbot = gr.Chatbot(
87
- label="Conversation",
88
- height=500,
89
- bubble_full_width=False
90
- )
91
- msg = gr.Textbox(
92
- label="Enter your message",
93
- placeholder="Type your message here...",
94
- show_label=False
95
- )
96
-
97
- # State variables to track conversation and document processing
98
- conversation_state = gr.State([])
99
- document_store = gr.State(None)
100
- api_key_state = gr.State(False)
101
-
102
- # Set up event handlers
103
- api_key_button.click(
104
- setup_api_key,
105
- inputs=[api_key_input],
106
- outputs=[api_key_state, api_key_status]
107
- )
108
-
109
- file_upload.upload(
110
- process_document,
111
- inputs=[file_upload, api_key_state],
112
- outputs=[document_store, upload_status]
113
- )
114
-
115
- # Function to handle chat messages
116
- def respond(message, conversation, model_name, temp, top_p, max_len, doc_store, api_ready):
117
- if not api_ready:
118
- return conversation, conversation, "Please set a valid API key first. ⚠️"
119
-
120
- if not message.strip():
121
- return conversation, conversation, upload_status.value
122
-
123
- # Update conversation with user message
124
- conversation.append([message, None])
125
- yield conversation, conversation, upload_status.value
126
-
127
- # Generate response based on whether document is uploaded
128
- if doc_store is not None:
129
- response = get_qa_response(
130
- message,
131
- model_name,
132
- doc_store,
133
- {"temperature": temp, "top_p": top_p, "max_length": max_len}
134
- )
135
- else:
136
- response = get_model_response(
137
- message,
138
- conversation,
139
- model_name,
140
- {"temperature": temp, "top_p": top_p, "max_length": max_len}
141
- )
142
-
143
- # Update conversation with assistant response
144
- conversation[-1][1] = response
145
- yield conversation, conversation, upload_status.value
146
-
147
- # Function to clear chat history
148
- def clear_history():
149
- return [], gr.update(value="Chat history cleared.")
150
-
151
- # Connect events
152
- msg.submit(
153
- respond,
154
- [msg, conversation_state, model_dropdown, temperature_slider, top_p_slider, max_length_slider, document_store, api_key_state],
155
- [chatbot, conversation_state, upload_status]
156
- )
157
-
158
- clear_button.click(
159
- clear_history,
160
- outputs=[conversation_state, upload_status]
161
- )
162
-
163
- return demo
164
-
165
- if __name__ == "__main__":
166
- # Create and launch the application
167
- app = create_chat_interface()
168
- app.launch(share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,10 +1 @@
1
- # requirements.txt
2
- gradio>=4.0.0
3
- huggingface_hub>=0.22.0
4
- langchain>=0.1.0
5
- langchain_community>=0.0.10
6
- faiss-cpu>=1.7.4
7
- python-dotenv>=1.0.0
8
- pypdf>=4.0.0
9
- docx2txt>=0.8
10
- sentence-transformers>=2.2.2
 
1
+ huggingface_hub==0.25.2