import streamlit as st import streamlit.components.v1 as components from transformers import pipeline from diffusers import StableDiffusionPipeline from datasets import load_dataset from peft import PeftConfig from accelerate import Accelerator from optimum.onnxruntime import ORTModelForSequenceClassification import torch import time # Cache resource-intensive models @st.cache_resource def load_diffuser_model(): return StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5") # Sidebar navigation st.sidebar.title("π€ Libraries Demo") st.sidebar.markdown("Explore text π, images πΌοΈ, and model ops π with Hugging Face and Arcee!") page = st.sidebar.selectbox( "Choose a Section", [ "π Home", "π Workflow", "π Transformers", "πΌοΈ Diffusers", "π Datasets", "βοΈ PEFT", "π Accelerate", "β‘ Optimum", "π DistillKit", "π MergeKit", "βοΈ Spectrum" ], help="Select a library to explore!" ) # Mermaid graph for DistillKit, MergeKit, and Spectrum workflows mermaid_code = """ graph TD subgraph DistillKit A1[Load Teacher Model] --> B1[Load Student Model] B1 --> C1[Configure Distillation] C1 --> D1[Perform Distillation] D1 --> E1[Evaluate Model] end subgraph MergeKit A2[Select Models] --> B2[Choose Merge Method] B2 --> C2[Set Parameters] C2 --> D2[Merge Models] D2 --> E2[Save Merged Model] end subgraph Spectrum A3[Load Model] --> B3[Analyze Layers] B3 --> C3[Generate Config] C3 --> D3[Apply Freezing] D3 --> E3[Train/Evaluate Model] end """ # Home Page if page == "π Home": st.title("Hugging Face & Arcee Libraries Demo π") st.markdown(""" Welcome to an interactive demo of powerful libraries for text, image, and model processing! - **π Text**: Analyze or generate text with Transformers. - **πΌοΈ Images**: Create visuals with Diffusers. - **π Models**: Distill, merge, and optimize with Arcee's DistillKit, MergeKit, and Spectrum. Navigate via the sidebar to explore each library! """) # Workflow Page with Mermaid Graph elif page == "π Workflow": st.header("π Workflows: DistillKit, MergeKit, Spectrum") st.markdown("See how inputs flow to outputs in Arceeβs libraries with this Mermaid graph:") components.html(f"""
""", height=600) # Transformers Section elif page == "π Transformers": st.header("π Transformers") st.markdown("Process text with pre-trained models.") task = st.selectbox("Task", ["Sentiment Analysis", "Text Generation"]) text = st.text_area("Input Text", "") if st.button("Run") and text: with st.spinner("Processing..."): if task == "Sentiment Analysis": result = pipeline("sentiment-analysis")(text) st.write(f"Result: {result[0]['label']} (Score: {result[0]['score']:.2f})") else: result = pipeline("text-generation")(text, max_length=50)[0]['generated_text'] st.write(f"Generated: {result}") # Diffusers Section elif page == "πΌοΈ Diffusers": st.header("πΌοΈ Diffusers") st.markdown("Generate images from text.") prompt = st.text_input("Prompt", "A futuristic city") if st.button("Generate"): with st.spinner("Generating..."): pipe = load_diffuser_model() image = pipe(prompt).images[0] st.image(image, caption=prompt) # Datasets Section elif page == "π Datasets": st.header("π Datasets") st.markdown("Load and explore datasets.") dataset = st.selectbox("Dataset", ["imdb", "squad"]) if st.button("Load"): data = load_dataset(dataset, split="train[:5]") st.write(data) # PEFT Section elif page == "βοΈ PEFT": st.header("βοΈ PEFT") st.markdown("Parameter-efficient fine-tuning.") text = st.text_area("Text", "") if st.button("Classify") and text: st.write("Simulated PEFT classification: Positive") # Accelerate Section elif page == "π Accelerate": st.header("π Accelerate") st.markdown("Optimize across devices.") text = st.text_area("Text", "") if st.button("Analyze") and text: accelerator = Accelerator() result = pipeline("sentiment-analysis")(text) st.write(f"Result: {result[0]['label']} (Score: {result[0]['score']:.2f})") # Optimum Section elif page == "β‘ Optimum": st.header("β‘ Optimum") st.markdown("Hardware-accelerated inference.") text = st.text_area("Text", "") if st.button("Classify") and text: st.write("Simulated Optimum result: Positive") # DistillKit Section elif page == "π DistillKit": st.header("π DistillKit: Model Distillation") st.markdown("Distill large models into smaller, efficient ones. Here are the top 5 functions:") # 1. Load teacher model teacher = st.selectbox("Teacher Model", ["arcee-ai/Arcee-Spark", "bert-base-uncased"]) st.write(f"1. Loaded teacher: {teacher}") # 2. Load student model student = st.selectbox("Student Model", ["Qwen/Qwen2-1.5B", "distilbert-base-uncased"]) st.write(f"2. Loaded student: {student}") # 3. Configure distillation temp = st.slider("Temperature", 1.0, 5.0, 2.0) alpha = st.slider("Alpha", 0.0, 1.0, 0.5) st.write(f"3. Config: Temp={temp}, Alpha={alpha}") # 4. Perform distillation (simulated) if st.button("Distill"): with st.spinner("Distilling..."): time.sleep(2) st.success("4. Distillation complete!") # 5. Evaluate distilled model st.write("5. Evaluating...") metrics = {"accuracy": 0.85, "loss": 0.12} st.write(f"Metrics: {metrics}") st.markdown(""" **How It Works:** DistillKit compresses a teacher model into a student model using distillation techniques. ```python config = {"teacher": "arcee-ai/Arcee-Spark", "student": "Qwen/Qwen2-1.5B", "temp": 2.0, "alpha": 0.5}