Spaces:
Sleeping
Sleeping
File size: 2,319 Bytes
1b63e6c 44922b3 1b63e6c 44922b3 1b63e6c 44922b3 1b63e6c 44922b3 1b63e6c 44922b3 1b63e6c 44922b3 1b63e6c 44922b3 1b63e6c 44922b3 1b63e6c 44922b3 f2660e3 44922b3 1b63e6c 44922b3 1b63e6c 44922b3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 |
import gradio as gr
import torch
from transformers import pipeline
class LearningPathGenerator:
def __init__(self):
self.device = 0 if torch.cuda.is_available() else -1
# Initialize models
self.transcriber = pipeline("automatic-speech-recognition",
model="openai/whisper-base",
device=self.device)
self.generator = pipeline("text-generation",
model="gpt2",
device=self.device)
def process_audio(self,
audio_path: str,
difficulty: str = "intermediate") -> dict:
try:
# Transcribe audio
transcription = self.transcriber(audio_path)["text"]
# Generate learning path
prompt = f"""
Based on the following text, create a detailed learning path
for {difficulty} level:
{transcription}
Learning path:
"""
analysis = self.generator(prompt,
max_length=300,
num_return_sequences=1)[0]["generated_text"]
return {
"transcription": transcription,
"analysis": analysis
}
except Exception as e:
return {
"transcription": f"Error: {str(e)}",
"analysis": "Could not generate analysis."
}
def create_interface():
app = gr.Interface(
fn=LearningPathGenerator().process_audio,
inputs=[
gr.Audio(type="filepath", label="Upload Audio"),
gr.Dropdown(
choices=["beginner", "intermediate", "advanced"],
value="intermediate",
label="Difficulty Level"
)
],
outputs=[
gr.Textbox(label="Audio Transcription"),
gr.Textbox(label="Learning Path")
],
title="π Learning Path Generator",
description="Upload an audio file describing your learning goals and receive a personalized learning path!"
)
return app
if __name__ == "__main__":
app = create_interface()
app.launch() |