File size: 1,245 Bytes
0ca0ad7 64b1575 a88e577 0609008 a88e577 64b1575 3719203 0609008 64b1575 3719203 64b1575 0609008 64b1575 3719203 19fec0d 0609008 0ca0ad7 978099a 0609008 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import gradio as gr
import pandas as pd
from transformers import pipeline, AutoModelForCausalLM, AutoTokenizer
# Initialize the Hugging Face pipeline
model_name = "gpt2" # Using GPT-2 to ensure compatibility
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
def generate_solutions(query):
# Use the language model to generate solutions
response = generator(query, max_length=100, num_return_sequences=3)
# Extract the generated texts
solutions = [{"Solution": r['generated_text']} for r in response]
# Convert solutions to a DataFrame
df = pd.DataFrame(solutions)
# Convert DataFrame to HTML table
table_html = df.to_html(escape=False, index=False)
return table_html
# Create a Gradio interface
iface = gr.Interface(
fn=generate_solutions,
inputs=gr.Textbox(lines=2, placeholder="Describe the problem with the machine..."),
outputs=gr.HTML(),
title="Oroz: Your Industry Maintenance Assistant",
description="Describe the problem with your machine, and get an organized table of suggested solutions."
)
iface.launch(share=True)
|