Spaces:
Sleeping
Sleeping
""" | |
My Workflow App | |
A workflow application created with MOUSE Workflow builder. | |
Generated by MOUSE Workflow | |
""" | |
import os | |
import json | |
import gradio as gr | |
import requests | |
# Workflow configuration | |
WORKFLOW_DATA = { | |
"nodes": [ | |
{ | |
"id": "input_1", | |
"type": "ChatInput", | |
"position": { | |
"x": 100, | |
"y": 200 | |
}, | |
"data": { | |
"label": "User Question", | |
"template": { | |
"input_value": { | |
"value": "What is the capital of Korea?" | |
} | |
} | |
} | |
}, | |
{ | |
"id": "llm_1", | |
"type": "llmNode", | |
"position": { | |
"x": 400, | |
"y": 200 | |
}, | |
"data": { | |
"label": "AI Processing", | |
"template": { | |
"provider": { | |
"value": "OpenAI" | |
}, | |
"model": { | |
"value": "gpt-4.1-mini" | |
}, | |
"temperature": { | |
"value": 0.7 | |
}, | |
"system_prompt": { | |
"value": "You are a helpful assistant." | |
} | |
} | |
} | |
}, | |
{ | |
"id": "output_1", | |
"type": "ChatOutput", | |
"position": { | |
"x": 700, | |
"y": 200 | |
}, | |
"data": { | |
"label": "Answer" | |
} | |
} | |
], | |
"edges": [ | |
{ | |
"id": "e1", | |
"source": "input_1", | |
"target": "llm_1" | |
}, | |
{ | |
"id": "e2", | |
"source": "llm_1", | |
"target": "output_1" | |
} | |
] | |
} | |
def execute_workflow(*input_values): | |
"""Execute the workflow with given inputs""" | |
# API keys from environment | |
vidraft_token = os.getenv("FRIENDLI_TOKEN") | |
openai_key = os.getenv("OPENAI_API_KEY") | |
nodes = WORKFLOW_DATA.get("nodes", []) | |
edges = WORKFLOW_DATA.get("edges", []) | |
results = {} | |
# Get input nodes | |
input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]] | |
# Map inputs to node IDs | |
for i, node in enumerate(input_nodes): | |
if i < len(input_values): | |
results[node["id"]] = input_values[i] | |
# Process nodes | |
for node in nodes: | |
node_id = node.get("id") | |
node_type = node.get("type", "") | |
node_data = node.get("data", {}) | |
template = node_data.get("template", {}) | |
if node_type == "textNode": | |
# Combine connected inputs | |
base_text = template.get("text", {}).get("value", "") | |
connected_inputs = [] | |
for edge in edges: | |
if edge.get("target") == node_id: | |
source_id = edge.get("source") | |
if source_id in results: | |
connected_inputs.append(f"{source_id}: {results[source_id]}") | |
if connected_inputs: | |
results[node_id] = f"{base_text}\n\nInputs:\n" + "\n".join(connected_inputs) | |
else: | |
results[node_id] = base_text | |
elif node_type in ["llmNode", "OpenAIModel", "ChatModel"]: | |
# Get provider and model | |
provider = template.get("provider", {}).get("value", "OpenAI") | |
temperature = template.get("temperature", {}).get("value", 0.7) | |
system_prompt = template.get("system_prompt", {}).get("value", "") | |
# Get input text | |
input_text = "" | |
for edge in edges: | |
if edge.get("target") == node_id: | |
source_id = edge.get("source") | |
if source_id in results: | |
input_text = results[source_id] | |
break | |
# Call API | |
if provider == "OpenAI" and openai_key: | |
try: | |
from openai import OpenAI | |
client = OpenAI(api_key=openai_key) | |
messages = [] | |
if system_prompt: | |
messages.append({"role": "system", "content": system_prompt}) | |
messages.append({"role": "user", "content": input_text}) | |
response = client.chat.completions.create( | |
model="gpt-4.1-mini", | |
messages=messages, | |
temperature=temperature, | |
max_tokens=1000 | |
) | |
results[node_id] = response.choices[0].message.content | |
except Exception as e: | |
results[node_id] = f"[OpenAI Error: {str(e)}]" | |
elif provider == "VIDraft" and vidraft_token: | |
try: | |
headers = { | |
"Authorization": f"Bearer {vidraft_token}", | |
"Content-Type": "application/json" | |
} | |
messages = [] | |
if system_prompt: | |
messages.append({"role": "system", "content": system_prompt}) | |
messages.append({"role": "user", "content": input_text}) | |
payload = { | |
"model": "dep89a2fld32mcm", | |
"messages": messages, | |
"max_tokens": 16384, | |
"temperature": temperature, | |
"top_p": 0.8, | |
"stream": False | |
} | |
response = requests.post( | |
"https://api.friendli.ai/dedicated/v1/chat/completions", | |
headers=headers, | |
json=payload, | |
timeout=30 | |
) | |
if response.status_code == 200: | |
results[node_id] = response.json()["choices"][0]["message"]["content"] | |
else: | |
results[node_id] = f"[VIDraft Error: {response.status_code}]" | |
except Exception as e: | |
results[node_id] = f"[VIDraft Error: {str(e)}]" | |
else: | |
# Show which API key is missing | |
if provider == "OpenAI": | |
results[node_id] = "[OpenAI API key not found. Please set OPENAI_API_KEY in Space secrets]" | |
elif provider == "VIDraft": | |
results[node_id] = "[VIDraft API key not found. Please set FRIENDLI_TOKEN in Space secrets]" | |
else: | |
results[node_id] = f"[Simulated Response: {input_text[:50]}...]" | |
elif node_type in ["ChatOutput", "textOutput", "Output"]: | |
# Get connected result | |
for edge in edges: | |
if edge.get("target") == node_id: | |
source_id = edge.get("source") | |
if source_id in results: | |
results[node_id] = results[source_id] | |
break | |
# Return outputs | |
output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]] | |
return [results.get(n["id"], "") for n in output_nodes] | |
# Build UI | |
with gr.Blocks(title="My Workflow App", theme=gr.themes.Soft()) as demo: | |
gr.Markdown("# My Workflow App") | |
gr.Markdown("A workflow application created with MOUSE Workflow builder.") | |
# API Status Check | |
vidraft_token = os.getenv("FRIENDLI_TOKEN") | |
openai_key = os.getenv("OPENAI_API_KEY") | |
if not vidraft_token and not openai_key: | |
gr.Markdown(""" | |
β οΈ **API Keys Required** | |
Please set the following environment variables in Space settings β Secrets: | |
- `FRIENDLI_TOKEN` for VIDraft (Gemma-3-r1984-27B) | |
- `OPENAI_API_KEY` for OpenAI (gpt-4.1-mini) | |
""") | |
# Extract nodes | |
nodes = WORKFLOW_DATA.get("nodes", []) | |
input_nodes = [n for n in nodes if n.get("type") in ["ChatInput", "textInput", "Input", "numberInput"]] | |
output_nodes = [n for n in nodes if n.get("type") in ["ChatOutput", "textOutput", "Output"]] | |
# Create inputs | |
inputs = [] | |
if input_nodes: | |
gr.Markdown("### π₯ Inputs") | |
for node in input_nodes: | |
label = node.get("data", {}).get("label", node.get("id")) | |
template = node.get("data", {}).get("template", {}) | |
default_value = template.get("input_value", {}).get("value", "") | |
if node.get("type") == "numberInput": | |
inp = gr.Number(label=label, value=float(default_value) if default_value else 0) | |
else: | |
inp = gr.Textbox(label=label, value=default_value, lines=2) | |
inputs.append(inp) | |
# Execute button | |
btn = gr.Button("π Execute Workflow", variant="primary") | |
# Create outputs | |
outputs = [] | |
if output_nodes: | |
gr.Markdown("### π€ Outputs") | |
for node in output_nodes: | |
label = node.get("data", {}).get("label", node.get("id")) | |
out = gr.Textbox(label=label, interactive=False, lines=3) | |
outputs.append(out) | |
# Connect | |
btn.click(fn=execute_workflow, inputs=inputs, outputs=outputs) | |
gr.Markdown("---") | |
gr.Markdown("*Powered by MOUSE Workflow*") | |
if __name__ == "__main__": | |
demo.launch() | |