File size: 4,767 Bytes
b0450d8 8a53784 3a39257 181f321 b0450d8 d078b7e 1d5d1d4 181f321 b0450d8 181f321 b0450d8 181f321 b0450d8 1d5d1d4 a4d4120 1d5d1d4 b0450d8 181f321 b0450d8 181f321 33689fd b0450d8 3a39257 b0450d8 181f321 b0450d8 181f321 b0450d8 181f321 8a53784 181f321 8a53784 1d5d1d4 181f321 b0450d8 181f321 a4d4120 181f321 b0450d8 3a39257 181f321 3a39257 b0450d8 181f321 3a39257 181f321 1d5d1d4 b0450d8 181f321 b0450d8 181f321 33689fd 1d5d1d4 3a39257 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 |
# gradio_generator.py - CPU & Spaces-compatible Gradio interface with HF Inference API for STT & Monetization
import os
import json
import requests
import gradio as gr
from deployer.simulator_interface import VirtualRobot
from deployer.revenue_tracker import get_revenue_stats, package_artifacts
from core_creator.voice_to_app import VoiceToAppCreator
# Hugging Face Inference API settings
HF_STT_URL = "https://api-inference.huggingface.co/models/openai/whisper-small"
HK_API_KEY = os.getenv("HK_API_KEY")
if not HK_API_KEY:
raise EnvironmentError(
"Please set the HK_API_KEY environment variable for HuggingFace Inference API.")
HEADERS = {"Authorization": f"Bearer {HK_API_KEY}"}
# Core robot logic
def robot_behavior(user_input: str) -> str:
bot = VirtualRobot()
text = user_input.strip().lower()
if any(greet in text for greet in ["hello", "hi", "hey", "welcome"]):
return bot.perform_action("wave") + "\n" + bot.perform_action("say Hello there!")
if text.startswith("say "):
return bot.perform_action(text)
return bot.perform_action("say I'm sorry, I didn't understand that.")
# Transcribe via Hugging Face Inference API
def transcribe_audio(audio_file: str) -> str:
with open(audio_file, "rb") as f:
data = f.read()
response = requests.post(HF_STT_URL, headers=HEADERS, data=data)
if response.status_code != 200:
return f"β Transcription error: {response.status_code}"
result = response.json()
# The inference API returns {'text': '...'}
return result.get("text", "")
# Combined flow
def transcribe_and_respond(audio_file: str) -> str:
text = transcribe_audio(audio_file)
return robot_behavior(text)
# Package generation pipeline
def on_generate(i: str):
creator = VoiceToAppCreator(i)
assets = creator.run_pipeline()
zip_path = package_artifacts(assets)
blueprint = assets.get("blueprint", {})
code_files = assets.get("code", {})
code_preview = "\n\n".join([f"# {fname}\n{content}" for fname, content in code_files.items()])
return (
"β
App generated successfully!",
zip_path,
blueprint,
code_preview
)
# Build full-featured Gradio app
def launch_gradio_app(
title: str = "RoboSage App",
description: str = "Your robot, your voice, monetized."
) -> gr.Blocks:
with gr.Blocks() as demo:
gr.Markdown(f"# π {title}\n\n{description}")
# 1. Generate & Download Artifacts
with gr.Accordion("π¨ Generate App & Download Artifacts", open=True):
idea = gr.Textbox(label="Robot Idea", placeholder="e.g. A friendly greeting robot.")
gen_btn = gr.Button("Generate & Package", key="gen-app-btn")
status = gr.Textbox(label="Generation Status", interactive=False)
download_zip = gr.File(label="Download Artifacts (.zip)")
blueprint_view = gr.JSON(label="App Blueprint")
code_view = gr.Code(label="Generated Code Preview", language="python")
gen_btn.click(
fn=on_generate,
inputs=[idea],
outputs=[status, download_zip, blueprint_view, code_view]
)
# 2. Robot Simulation (Text & Voice)
with gr.Accordion("π€ Test Your Robot", open=False):
text_input = gr.Textbox(label="Text Command", placeholder="Type 'hello' or 'say Good job!'" )
text_btn = gr.Button("Send Text", key="send-text-btn")
text_output = gr.Textbox(label="Robot Response", lines=3, interactive=False)
text_btn.click(fn=robot_behavior, inputs=[text_input], outputs=[text_output])
gr.Markdown("---")
audio_input = gr.Audio(source="microphone", type="filepath", label="Record Command")
audio_btn = gr.Button("Send Audio", key="send-audio-btn")
audio_output = gr.Textbox(label="Robot Response (via voice)", lines=3, interactive=False)
audio_btn.click(fn=transcribe_and_respond, inputs=[audio_input], outputs=[audio_output])
# 3. Monetization & Revenue Dashboard
with gr.Accordion("π° Monetization Dashboard", open=False):
subscribe_btn = gr.Button("Subscribe to Pro Plan", key="subscribe-btn")
subscribe_msg = gr.Textbox(label="Subscription Status", interactive=False)
rev_btn = gr.Button("View Revenue Stats", key="rev-stats-btn")
rev_table = gr.Dataframe(label="Revenue & Usage Metrics")
subscribe_btn.click(fn=lambda: "β
Subscribed!", inputs=None, outputs=[subscribe_msg])
rev_btn.click(fn=get_revenue_stats, inputs=None, outputs=[rev_table])
return demo
if __name__ == "__main__":
app = launch_gradio_app()
app.launch()
|