Spaces:
Sleeping
Sleeping
| # streamlit_app.py | |
| import os | |
| import time | |
| import requests | |
| import streamlit as st | |
| from datetime import datetime | |
| from typing import Generator, Dict, Any | |
| from requests.exceptions import RequestException | |
| import re | |
| import json | |
| from pydantic import BaseModel | |
| class AnswerFormat(BaseModel): | |
| think: str | |
| answer: str | |
| def parse_and_render_streamed_response(streamed): | |
| """ | |
| Parse streamed LLM response and render with Streamlit. | |
| Args: | |
| streamed (str): The raw response from LLM | |
| """ | |
| # Check if <think> tags exist | |
| think_pattern = r'<think>(.*?)</think>' | |
| think_match = re.search(think_pattern, streamed, re.DOTALL) | |
| if think_match: | |
| # Extract reasoning content from <think> tags | |
| reasoning = think_match.group(1).strip() | |
| # Extract answer content (everything after </think>) | |
| answer_start = streamed.find('</think>') + len('</think>') | |
| answer = streamed[answer_start:].strip() | |
| # answer_json = json.loads(answer) | |
| # Render reasoning section | |
| if reasoning: | |
| st.subheader("π€ Reasoning") | |
| st.markdown(reasoning) | |
| # Render answer section | |
| if answer: | |
| st.subheader("π‘ Answer") | |
| st.markdown(answer) | |
| else: | |
| # No <think> tags found, render entire content as answer | |
| st.subheader("π‘ Answer") | |
| st.markdown(streamed) | |
| # ----------------- Configuration ----------------- | |
| st.set_page_config(page_title="Perplexity Chat", layout="wide") | |
| # ----------------- Sidebar Settings ----------------- | |
| st.sidebar.header("Settings") | |
| default_date = datetime.strptime("2023-01-01", "%Y-%m-%d").date() | |
| start_date = st.sidebar.date_input("Start Date", default_date) | |
| search_after_date_filter = start_date.strftime("%-m/%-d/%Y") | |
| # Load API key from environment variable | |
| api_key = os.environ["PERPLEXITY_API_KEY"] | |
| if not api_key: | |
| st.error("Environment variable PERPLEXITY_API_KEY not found.") | |
| st.stop() | |
| model = "sonar-deep-research" | |
| search_mode = "sec" | |
| # ----------------- Session State ----------------- | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| # ----------------- Chat UI ----------------- | |
| st.title("π Perplexity AI Chat Interface") | |
| st.caption("Ask questions and get responses powered by Perplexity AI.") | |
| # Display chat history | |
| for msg in st.session_state.messages: | |
| with st.chat_message(msg["role"]): | |
| st.markdown(msg["content"]) | |
| # ----------------- Streaming Response ----------------- | |
| def stream_response(response_text: str) -> Generator[str, None, None]: | |
| """Yield the response one word at a time for streaming effect.""" | |
| for word in response_text.split(): | |
| yield word + " " | |
| time.sleep(0.03) | |
| def call_perplexity_api(user_query: str) -> str: | |
| """Send user query to Perplexity API with retry logic.""" | |
| url = "https://api.perplexity.ai/chat/completions" | |
| headers = { | |
| "accept": "application/json", | |
| "authorization": f"Bearer {api_key}", | |
| "content-type": "application/json" | |
| } | |
| payload = { | |
| "model": model, | |
| "messages": [{"role": "user", "content": user_query}], | |
| "stream": False, | |
| "search_mode": search_mode, | |
| "search_after_date_filter": search_after_date_filter, | |
| "response_format": { | |
| "type": "json_schema", | |
| "json_schema": { | |
| "schema": AnswerFormat.model_json_schema() | |
| } | |
| } | |
| } | |
| retries = 3 | |
| for attempt in range(retries): | |
| try: | |
| response = requests.post(url, headers=headers, json=payload, timeout=120) | |
| response.raise_for_status() | |
| return response | |
| except RequestException as err: | |
| if attempt < retries - 1: | |
| wait_time = 2 ** attempt | |
| time.sleep(wait_time) | |
| else: | |
| raise err | |
| # ----------------- Chat Input ----------------- | |
| user_input = st.chat_input("Enter your question here...") | |
| if user_input: | |
| # Display user message | |
| with st.chat_message("user"): | |
| st.markdown(user_input) | |
| st.session_state.messages.append({"role": "user", "content": user_input}) | |
| # Display assistant response with spinner | |
| with st.chat_message("assistant"): | |
| with st.spinner("Thinking..."): | |
| try: | |
| full_response = call_perplexity_api(user_input).json() | |
| full_response_content = full_response["choices"][0]["message"]["content"] | |
| parse_and_render_streamed_response(full_response_content) | |
| st.session_state.messages.append({"role": "assistant", "content": full_response}) | |
| except requests.RequestException as err: | |
| st.error(f"Error: {err}") |