openfree commited on
Commit
58498f0
·
verified ·
1 Parent(s): a919ef9

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +292 -0
app.py ADDED
@@ -0,0 +1,292 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ 나의 앱
3
+ 멋지지
4
+ Generated by MOUSE Workflow
5
+ """
6
+
7
+ import os
8
+ import json
9
+ import gradio as gr
10
+ import requests
11
+
12
+ # Workflow configuration
13
+ WORKFLOW_DATA = {
14
+ "nodes": [
15
+ {
16
+ "id": "input_1",
17
+ "type": "ChatInput",
18
+ "position": {
19
+ "x": 100,
20
+ "y": 200
21
+ },
22
+ "data": {
23
+ "label": "User Question",
24
+ "template": {
25
+ "input_value": {
26
+ "value": "What is the capital of Korea?"
27
+ }
28
+ }
29
+ }
30
+ },
31
+ {
32
+ "id": "llm_1",
33
+ "type": "llmNode",
34
+ "position": {
35
+ "x": 400,
36
+ "y": 200
37
+ },
38
+ "data": {
39
+ "label": "AI Processing",
40
+ "template": {
41
+ "provider": {
42
+ "value": "VIDraft"
43
+ },
44
+ "model": {
45
+ "value": "Gemma-3-r1984-27B"
46
+ },
47
+ "temperature": {
48
+ "value": 0.7
49
+ },
50
+ "system_prompt": {
51
+ "value": "You are a helpful assistant."
52
+ }
53
+ }
54
+ }
55
+ },
56
+ {
57
+ "id": "output_1",
58
+ "type": "ChatOutput",
59
+ "position": {
60
+ "x": 700,
61
+ "y": 200
62
+ },
63
+ "data": {
64
+ "label": "Answer"
65
+ }
66
+ }
67
+ ],
68
+ "edges": [
69
+ {
70
+ "id": "e1",
71
+ "source": "input_1",
72
+ "target": "llm_1"
73
+ },
74
+ {
75
+ "id": "e2",
76
+ "source": "llm_1",
77
+ "target": "output_1"
78
+ }
79
+ ]
80
+ }
81
+
82
+ def execute_workflow(*input_values):
83
+ """Execute the workflow with given inputs"""
84
+
85
+ # API keys from environment
86
+ vidraft_token = os.getenv("FRIENDLI_TOKEN")
87
+ openai_key = os.getenv("OPENAI_API_KEY")
88
+
89
+ nodes = WORKFLOW_DATA.get("nodes", [])
90
+ edges = WORKFLOW_DATA.get("edges", [])
91
+
92
+ results = {}
93
+
94
+ # Get input nodes
95
+ input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
96
+
97
+ # Map inputs to node IDs
98
+ for i, node in enumerate(input_nodes):
99
+ if i < len(input_values):
100
+ results[node["id"]] = input_values[i]
101
+
102
+ # Process nodes
103
+ for node in nodes:
104
+ node_id = node.get("id")
105
+ node_type = node.get("type", "")
106
+ node_data = node.get("data", {})
107
+ template = node_data.get("template", {})
108
+
109
+ if node_type == "textNode":
110
+ # Combine connected inputs
111
+ base_text = template.get("text", {}).get("value", "")
112
+ connected_inputs = []
113
+
114
+ for edge in edges:
115
+ if edge.get("target") == node_id:
116
+ source_id = edge.get("source")
117
+ if source_id in results:
118
+ connected_inputs.append(f"{source_id}: {results[source_id]}")
119
+
120
+ if connected_inputs:
121
+ results[node_id] = f"{base_text}\n\nInputs:\n" + "\n".join(connected_inputs)
122
+ else:
123
+ results[node_id] = base_text
124
+
125
+ elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]:
126
+ # Get provider and model - VIDraft as default
127
+ provider = template.get("provider", {}).get("value", "VIDraft")
128
+ if provider not in ["VIDraft", "OpenAI"]:
129
+ provider = "VIDraft" # Default to VIDraft
130
+ temperature = template.get("temperature", {}).get("value", 0.7)
131
+ system_prompt = template.get("system_prompt", {}).get("value", "")
132
+
133
+ # Get input text
134
+ input_text = ""
135
+ for edge in edges:
136
+ if edge.get("target") == node_id:
137
+ source_id = edge.get("source")
138
+ if source_id in results:
139
+ input_text = results[source_id]
140
+ break
141
+
142
+ # Call API
143
+ if provider == "OpenAI" and openai_key:
144
+ try:
145
+ from openai import OpenAI
146
+ client = OpenAI(api_key=openai_key)
147
+
148
+ messages = []
149
+ if system_prompt:
150
+ messages.append({"role": "system", "content": system_prompt})
151
+ messages.append({"role": "user", "content": input_text})
152
+
153
+ response = client.chat.completions.create(
154
+ model="gpt-4.1-mini",
155
+ messages=messages,
156
+ temperature=temperature,
157
+ max_tokens=1000
158
+ )
159
+
160
+ results[node_id] = response.choices[0].message.content
161
+ except Exception as e:
162
+ results[node_id] = f"[OpenAI Error: {str(e)}]"
163
+
164
+ elif provider == "VIDraft" and vidraft_token:
165
+ try:
166
+ headers = {
167
+ "Authorization": f"Bearer {vidraft_token}",
168
+ "Content-Type": "application/json"
169
+ }
170
+
171
+ messages = []
172
+ if system_prompt:
173
+ messages.append({"role": "system", "content": system_prompt})
174
+ messages.append({"role": "user", "content": input_text})
175
+
176
+ payload = {
177
+ "model": "dep89a2fld32mcm",
178
+ "messages": messages,
179
+ "max_tokens": 16384,
180
+ "temperature": temperature,
181
+ "top_p": 0.8,
182
+ "stream": False
183
+ }
184
+
185
+ response = requests.post(
186
+ "https://api.friendli.ai/dedicated/v1/chat/completions",
187
+ headers=headers,
188
+ json=payload,
189
+ timeout=30
190
+ )
191
+
192
+ if response.status_code == 200:
193
+ results[node_id] = response.json()["choices"][0]["message"]["content"]
194
+ else:
195
+ results[node_id] = f"[VIDraft Error: {response.status_code}]"
196
+ except Exception as e:
197
+ results[node_id] = f"[VIDraft Error: {str(e)}]"
198
+ else:
199
+ # Show which API key is missing
200
+ if provider == "OpenAI":
201
+ results[node_id] = "[OpenAI API key not found. Please set OPENAI_API_KEY in Space secrets]"
202
+ elif provider == "VIDraft":
203
+ results[node_id] = "[VIDraft API key not found. Please set FRIENDLI_TOKEN in Space secrets]"
204
+ else:
205
+ results[node_id] = f"[No API key found for {provider}. Using simulated response: {input_text[:50]}...]"
206
+
207
+ elif node_type in ["ChatOutput", "textOutput", "Output"]:
208
+ # Get connected result
209
+ for edge in edges:
210
+ if edge.get("target") == node_id:
211
+ source_id = edge.get("source")
212
+ if source_id in results:
213
+ results[node_id] = results[source_id]
214
+ break
215
+
216
+ # Return outputs
217
+ output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
218
+ return [results.get(n["id"], "") for n in output_nodes]
219
+
220
+ # Build UI
221
+ with gr.Blocks(title="나의 앱", theme=gr.themes.Soft()) as demo:
222
+ gr.Markdown("# 나의 앱")
223
+ gr.Markdown("멋지지")
224
+
225
+ # API Status Check
226
+ vidraft_token = os.getenv("FRIENDLI_TOKEN")
227
+ openai_key = os.getenv("OPENAI_API_KEY")
228
+
229
+ with gr.Accordion("🔑 API Status", open=False):
230
+ if vidraft_token:
231
+ gr.Markdown("✅ **VIDraft API**: Connected (Gemma-3-r1984-27B)")
232
+ else:
233
+ gr.Markdown("❌ **VIDraft API**: Not configured")
234
+
235
+ if openai_key:
236
+ gr.Markdown("✅ **OpenAI API**: Connected (gpt-4.1-mini)")
237
+ else:
238
+ gr.Markdown("⚠️ **OpenAI API**: Not configured (optional)")
239
+
240
+ if not vidraft_token:
241
+ gr.Markdown("""
242
+ **⚠️ Important**: Please add FRIENDLI_TOKEN to Space secrets for the app to work properly.
243
+
244
+ Go to: Space settings → Repository secrets → Add secret
245
+ """)
246
+ elif not openai_key:
247
+ gr.Markdown("""
248
+ **💡 Tip**: The app will work with VIDraft alone. Add OPENAI_API_KEY if you need OpenAI features.
249
+ """)
250
+ else:
251
+ gr.Markdown("**✨ All APIs configured! Your app is fully functional.**")
252
+
253
+ # Extract nodes
254
+ nodes = WORKFLOW_DATA.get("nodes", [])
255
+ input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]]
256
+ output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]]
257
+
258
+ # Create inputs
259
+ inputs = []
260
+ if input_nodes:
261
+ gr.Markdown("### 📥 Inputs")
262
+ for node in input_nodes:
263
+ label = node.get("data", {}).get("label", node.get("id"))
264
+ template = node.get("data", {}).get("template", {})
265
+ default_value = template.get("input_value", {}).get("value", "")
266
+
267
+ if node.get("type") == "numberInput":
268
+ inp = gr.Number(label=label, value=float(default_value) if default_value else 0)
269
+ else:
270
+ inp = gr.Textbox(label=label, value=default_value, lines=2)
271
+ inputs.append(inp)
272
+
273
+ # Execute button
274
+ btn = gr.Button("🚀 Execute Workflow", variant="primary")
275
+
276
+ # Create outputs
277
+ outputs = []
278
+ if output_nodes:
279
+ gr.Markdown("### 📤 Outputs")
280
+ for node in output_nodes:
281
+ label = node.get("data", {}).get("label", node.get("id"))
282
+ out = gr.Textbox(label=label, interactive=False, lines=3)
283
+ outputs.append(out)
284
+
285
+ # Connect
286
+ btn.click(fn=execute_workflow, inputs=inputs, outputs=outputs)
287
+
288
+ gr.Markdown("---")
289
+ gr.Markdown("*Powered by MOUSE Workflow*")
290
+
291
+ if __name__ == "__main__":
292
+ demo.launch()