import streamlit as st import streamlit.components.v1 as components import os import json import random import base64 import glob import math import openai import pytz import re import requests import textract import time import zipfile import huggingface_hub import dotenv from audio_recorder_streamlit import audio_recorder from bs4 import BeautifulSoup from collections import deque from datetime import datetime from dotenv import load_dotenv from huggingface_hub import InferenceClient from io import BytesIO from openai import ChatCompletion from PyPDF2 import PdfReader from templates import bot_template, css, user_template from xml.etree import ElementTree as ET from PIL import Image from urllib.parse import quote # Ensure this import is included # Set page configuration with a title and favicon st.set_page_config( page_title="๐๐WordGameAI", page_icon="๐๐", layout="wide", initial_sidebar_state="expanded", menu_items={ 'Get Help': 'https://huggingface.co/awacke1', 'Report a bug': "https://huggingface.co/spaces/awacke1/WebDataDownload", 'About': "# Midjourney: https://discord.com/channels/@me/997514686608191558" } ) PromptPrefix = 'Create a markdown outline and table for a graphic novel with appropriate emojis for image slide theme names for plates and rules defining the method steps of plot lines begin to end (in ten steps or so) for topic of ' PromptPrefix2 = 'Create a streamlit python app with graphic novel features with art images and longer caption readings of mini stories, rhymes, songs and literary wit in form of graphic novels that are new and original. Show full code listing as streamlit app with procedural story telling based on functions variables and rules discussed in afformentioned method steps.. Create a UI implementing each feature using variables, reusable functions with parameters, and data driven application techniques using streamlit and/or gradio with python libraries and web components for Javascript and HTML5 including three.JS and Aframe if appropriate. Use appropriate emojis for all UI features and labels to focus on short list (1-3) lists of key parts, functions, relations, and conditions for topic: ' st.markdown('''### ๐โจ๐GraphicNovelAI ''') roleplaying_glossary = { "๐งโโ๏ธ Fantasy Realms": { "Kingdom Under Siege": ["Dark sorcery", "Dragon invasions", "Epic battles", "Quest for legendary artifacts", "Prophecies and destinies", "Betrayal and redemption"], "Wizard's Apprentice": [ "Magical academy", "Arcane secrets", "Summoning mythical creatures", "Forbidden spells", "Rivalry among wizards", "Journey to save the realm", "Ancient prophecies", "Taming ancient beasts" ], "Elven Chronicles": [ "Elven civilization", "Forest guardians", "War against darkness", "Alliance with dwarves", "Legendary weapons", "Lost realms", "Spiritual journeys", "Rebellion against tyranny" ], }, "๐ฆธ Superhero Adventures": { "City of Shadows": ["Mysterious disappearances", "Supernatural occurrences", "Vigilante justice", "Secret identities", "Crime syndicates", "Morally ambiguous choices"], "Heroic Origins": [ "Unleashing newfound powers", "Discovering hidden lineage", "Training with mentors", "Encountering arch-nemeses", "Exploring alternate dimensions", "Team-up with other heroes", "Navigating public perception", "Balancing personal life with heroics" ], "Infinity Nexus": [ "Cosmic threats", "Interdimensional warfare", "Time-bending adversaries", "Epic space battles", "Alliances with alien races", "Guardians of the galaxy", "Quests for ancient artifacts", "Existential dilemmas" ], }, "๐ Sci-Fi Sagas": { "Galactic Conquest": ["Intergalactic empires", "Robotic uprisings", "Space exploration", "AI rebellion", "Cybernetic enhancements", "Temporal paradoxes"], "Alien Encounters": [ "First contact scenarios", "Xenomorph invasions", "Planetary colonization", "Rogue AI civilizations", "Intergalactic diplomacy", "Space piracy", "Mysterious anomalies", "Existential threats" ], "Cyberpunk Dystopia": [ "Megacorporation dominance", "Neon-lit streets", "Cybernetic enhancements", "Underground resistance", "Virtual reality addiction", "Hackers and technomancers", "Corporate espionage", "Class warfare" ], "Post-Apocalyptic Odyssey": [ "World devastated by cataclysmic event", "Survival in harsh environments", "Scavenging for resources", "Encounters with mutated creatures", "Struggle for power in a lawless society", "Journey to find sanctuary", "Rebuilding civilization", "Exploration of the remnants of the old world" ], }, "๐ Comedy Capers": { "Misadventures of Mischief": ["Pranks gone wrong", "Absurd misunderstandings", "Comic relief sidekicks", "Hilarious hijinks", "Whimsical escapades", "Slapstick comedy"], "Surreal Shenanigans": [ "Reality-bending antics", "Fourth-wall breaks", "Meta-humor", "Ridiculous scenarios", "Quirky characters", "Parodies of pop culture", "Satirical commentary", "Unexpected plot twists" ], }, "๐ญ Dramatic Epics": { "Tragic Heroes": ["Heart-wrenching sacrifices", "Impossible choices", "Inevitable betrayals", "Redemption arcs", "Tortured souls", "Unrequited love"], "Family Feuds": [ "Generational conflicts", "Secret family legacies", "Sibling rivalries", "Parental expectations", "Forbidden romances", "Legacy of past mistakes", "Reconciliation journeys", "Personal growth through adversity" ], }, } # 9. Sidebar with UI controls to review and re-run prompts and continue responses @st.cache_resource def get_table_download_link(file_path): with open(file_path, 'r') as file: data = file.read() b64 = base64.b64encode(data.encode()).decode() file_name = os.path.basename(file_path) ext = os.path.splitext(file_name)[1] # get the file extension if ext == '.txt': mime_type = 'text/plain' elif ext == '.py': mime_type = 'text/plain' elif ext == '.xlsx': mime_type = 'text/plain' elif ext == '.csv': mime_type = 'text/plain' elif ext == '.htm': mime_type = 'text/html' elif ext == '.md': mime_type = 'text/markdown' elif ext == '.wav': mime_type = 'audio/wav' else: mime_type = 'application/octet-stream' # general binary data type href = f'{file_name}' return href def FileSidebar(): # ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------ # Compose a file sidebar of markdown md files: all_files = glob.glob("*.md") all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10] # exclude files with short names all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order if st.sidebar.button("๐ Delete All Text"): for file in all_files: os.remove(file) st.experimental_rerun() if st.sidebar.button("โฌ๏ธ Download All"): zip_file = create_zip_of_files(all_files) st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True) file_contents='' next_action='' for file in all_files: col1, col2, col3, col4, col5 = st.sidebar.columns([1,6,1,1,1]) # adjust the ratio as needed with col1: if st.button("๐", key="md_"+file): # md emoji button with open(file, 'r') as f: file_contents = f.read() next_action='md' with col2: st.markdown(get_table_download_link(file), unsafe_allow_html=True) with col3: if st.button("๐", key="open_"+file): # open emoji button with open(file, 'r') as f: file_contents = f.read() next_action='open' with col4: if st.button("๐", key="read_"+file): # search emoji button with open(file, 'r') as f: file_contents = f.read() next_action='search' with col5: if st.button("๐", key="delete_"+file): os.remove(file) st.experimental_rerun() if len(file_contents) > 0: if next_action=='open': file_content_area = st.text_area("File Contents:", file_contents, height=500) if next_action=='md': st.markdown(file_contents) buttonlabel = '๐Run with Llama and GPT.' if st.button(key='RunWithLlamaandGPT', label = buttonlabel): user_prompt = file_contents # Llama versus GPT Battle! all="" try: #st.write('๐Running with Llama.') response = StreamLLMChatResponse(file_contents) filename = generate_filename(user_prompt, "md") create_file(filename, file_contents, response, should_save) all=response #SpeechSynthesis(response) except: st.markdown('Llama is sleeping. Restart ETA 30 seconds.') # gpt try: #st.write('๐Running with GPT.') response2 = chat_with_model(user_prompt, file_contents, model_choice) filename2 = generate_filename(file_contents, choice) create_file(filename2, user_prompt, response, should_save) all=all+response2 #SpeechSynthesis(response2) except: st.markdown('GPT is sleeping. Restart ETA 30 seconds.') SpeechSynthesis(all) if next_action=='search': file_content_area = st.text_area("File Contents:", file_contents, height=500) #st.write('๐Running with Llama and GPT.') user_prompt = file_contents # Llama versus GPT Battle! all="" try: st.write('๐Running with Llama.') response = StreamLLMChatResponse(file_contents) filename = generate_filename(user_prompt, ".md") create_file(filename, file_contents, response, should_save) all=response #SpeechSynthesis(response) except: st.markdown('Llama is sleeping. Restart ETA 30 seconds.') # gpt try: #st.write('๐Running with GPT.') response2 = chat_with_model(user_prompt, file_contents, model_choice) filename2 = generate_filename(file_contents, choice) create_file(filename2, user_prompt, response, should_save) all=all+response2 #SpeechSynthesis(response2) except: st.markdown('GPT is sleeping. Restart ETA 30 seconds.') SpeechSynthesis(all) # ----------------------------------------------------- File Sidebar for Jump Gates ------------------------------------------ FileSidebar() # ---- Art Card Sidebar with Random Selection of image: def get_image_as_base64(url): response = requests.get(url) if response.status_code == 200: # Convert the image to base64 return base64.b64encode(response.content).decode("utf-8") else: return None def create_download_link(filename, base64_str): href = f'Download Image' return href # List of image URLs image_urls = [ "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/wWLDCOgbmNI_PKWFKx15H.png", "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/qG8n3EJKLflRnzqNAhD7g.png", "https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/S5Fd7LH_mBpD1Vtgypgh1.png", #"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/gikaT871Mm8k6wuv4pl_g.png", #"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/2YsnDyc_nDNW71PPKozdN.png", #"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/G_GkRD_IT3f14K7gWlbwi.png", #"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/eGii5DvGIuCtWCU08_i-D.png", #"https://cdn-uploads.huggingface.co/production/uploads/620630b603825909dcbeba35/2-KfxcuXRcTFiHf4XlNsX.png" ] # Select a random URL from the list selected_image_url = random.choice(image_urls) # Get the base64 encoded string of the selected image selected_image_base64 = get_image_as_base64(selected_image_url) if selected_image_base64 is not None: with st.sidebar: st.markdown("""### Graphic Novel AI""") # Display the image st.markdown(f"") # Create and display the download link download_link = create_download_link("downloaded_image.png", selected_image_base64) st.markdown(download_link, unsafe_allow_html=True) else: st.sidebar.write("Failed to load the image.") # ---- Art Card Sidebar with random selection of image. # Ensure the directory for storing scores exists score_dir = "scores" os.makedirs(score_dir, exist_ok=True) # Function to generate a unique key for each button, including an emoji def generate_key(label, header, idx): return f"{header}_{label}_{idx}_key" # Function to increment and save score def update_score(key, increment=1): score_file = os.path.join(score_dir, f"{key}.json") if os.path.exists(score_file): with open(score_file, "r") as file: score_data = json.load(file) else: score_data = {"clicks": 0, "score": 0} score_data["clicks"] += 1 score_data["score"] += increment with open(score_file, "w") as file: json.dump(score_data, file) return score_data["score"] # Function to load score def load_score(key): score_file = os.path.join(score_dir, f"{key}.json") if os.path.exists(score_file): with open(score_file, "r") as file: score_data = json.load(file) return score_data["score"] return 0 def search_glossary(query): for category, terms in roleplaying_glossary.items(): if query.lower() in (term.lower() for term in terms): st.markdown(f"#### {category}") st.write(f"- {query}") all="" query2 = PromptPrefix + query # Add prompt preface for method step task behavior # st.write('## ' + query2) # st.write('## ๐ Running with GPT.') # ------------------------------------------------------------------------------------------------- response = chat_with_model(query2) filename = generate_filename(query2 + ' --- ' + response, "md") create_file(filename, query, response, should_save) SpeechSynthesis(response) query3 = PromptPrefix2 + query + ' creating streamlit functions that implement outline of method steps below: ' + response # Add prompt preface for coding task behavior # st.write('## ' + query3) # st.write('## ๐ Coding with GPT.') # ------------------------------------------------------------------------------------------------- response2 = chat_with_model(query3) filename_txt = generate_filename(query + ' --- ' + response2, "py") create_file(filename_txt, query, response2, should_save) all = '# Query: ' + query + '# Response: ' + response + '# Response2: ' + response2 filename_txt2 = generate_filename(query + ' --- ' + all, "md") create_file(filename_txt2, query, all, should_save) SpeechSynthesis(all) return all # Function to display the glossary in a structured format def display_glossary(glossary, area): if area in glossary: st.subheader(f"๐ Glossary for {area}") for game, terms in glossary[area].items(): st.markdown(f"### {game}") for idx, term in enumerate(terms, start=1): st.write(f"{idx}. {term}") # Function to display the entire glossary in a grid format with links def display_glossary_grid(roleplaying_glossary): search_urls = { "๐": lambda k: f"https://en.wikipedia.org/wiki/{quote(k)}", "๐": lambda k: f"https://www.google.com/search?q={quote(k)}", "โถ๏ธ": lambda k: f"https://www.youtube.com/results?search_query={quote(k)}", "๐": lambda k: f"https://www.bing.com/search?q={quote(k)}", "๐ฒ": lambda k: f"https://huggingface.co/spaces/awacke1/GraphicNovelAI?q={quote(k)}", # this url plus query! } for category, details in roleplaying_glossary.items(): st.write(f"### {category}") cols = st.columns(len(details)) # Create dynamic columns based on the number of games for idx, (game, terms) in enumerate(details.items()): with cols[idx]: st.markdown(f"#### {game}") for term in terms: links_md = ' '.join([f"[{emoji}]({url(term)})" for emoji, url in search_urls.items()]) st.markdown(f"{term} {links_md}", unsafe_allow_html=True) game_emojis = { "Dungeons and Dragons": "๐", "Call of Cthulhu": "๐", "GURPS": "๐ฒ", "Pathfinder": "๐บ๏ธ", "Kindred of the East": "๐ ", "Changeling": "๐", } topic_emojis = { "Core Rulebooks": "๐", "Maps & Settings": "๐บ๏ธ", "Game Mechanics & Tools": "โ๏ธ", "Monsters & Adversaries": "๐น", "Campaigns & Adventures": "๐", "Creatives & Assets": "๐จ", "Game Master Resources": "๐ ๏ธ", "Lore & Background": "๐", "Character Development": "๐ง", "Homebrew Content": "๐ง", "General Topics": "๐", } # Adjusted display_buttons_with_scores function def display_buttons_with_scores(): for category, games in roleplaying_glossary.items(): category_emoji = topic_emojis.get(category, "๐") # Default to search icon if no match st.markdown(f"## {category_emoji} {category}") for game, terms in games.items(): game_emoji = game_emojis.get(game, "๐ฎ") # Default to generic game controller if no match for term in terms: key = f"{category}_{game}_{term}".replace(' ', '_').lower() score = load_score(key) if st.button(f"{game_emoji} {term} {score}", key=key): update_score(key) # Create a dynamic query incorporating emojis and formatting for clarity query_prefix = f"{category_emoji} {game_emoji} **{game} - {category}:**" # ---------------------------------------------------------------------------------------------- #query_body = f"Create a detailed outline for **{term}** with subpoints highlighting key aspects, using emojis for visual engagement. Include step-by-step rules and boldface important entities and ruleset elements." query_body = f"Create a streamlit python app.py that produces a detailed markdown outline and emoji laden user interface with labels with the entity name and emojis in all labels with a set of streamlit UI components with drop down lists and dataframes and buttons with expander and sidebar for the app to run the data as default values mostly in text boxes. Feature a 3 point outline sith 3 subpoints each where each line has about six words describing this and also contain appropriate emoji for creating sumamry of all aspeccts of this topic. an outline for **{term}** with subpoints highlighting key aspects, using emojis for visual engagement. Include step-by-step rules and boldface important entities and ruleset elements." response = search_glossary(query_prefix + query_body) def fetch_wikipedia_summary(keyword): # Placeholder function for fetching Wikipedia summaries # In a real app, you might use requests to fetch from the Wikipedia API return f"Summary for {keyword}. For more information, visit Wikipedia." def create_search_url_youtube(keyword): base_url = "https://www.youtube.com/results?search_query=" return base_url + keyword.replace(' ', '+') def create_search_url_bing(keyword): base_url = "https://www.bing.com/search?q=" return base_url + keyword.replace(' ', '+') def create_search_url_wikipedia(keyword): base_url = "https://www.wikipedia.org/search-redirect.php?family=wikipedia&language=en&search=" return base_url + keyword.replace(' ', '+') def create_search_url_google(keyword): base_url = "https://www.google.com/search?q=" return base_url + keyword.replace(' ', '+') def create_search_url_ai(keyword): base_url = "https://huggingface.co/spaces/awacke1/GraphicNovelAI?q=" return base_url + keyword.replace(' ', '+') def display_images_and_wikipedia_summaries(): image_files = [f for f in os.listdir('.') if f.endswith('.png')] if not image_files: st.write("No PNG images found in the current directory.") return for image_file in image_files: image = Image.open(image_file) st.image(image, caption=image_file, use_column_width=True) keyword = image_file.split('.')[0] # Assumes keyword is the file name without extension # Display Wikipedia and Google search links wikipedia_url = create_search_url_wikipedia(keyword) google_url = create_search_url_google(keyword) youtube_url = create_search_url_youtube(keyword) bing_url = create_search_url_bing(keyword) ai_url = create_search_url_ai(keyword) links_md = f""" [Wikipedia]({wikipedia_url}) | [Google]({google_url}) | [YouTube]({youtube_url}) | [Bing]({bing_url}) | [AI]({ai_url}) """ st.markdown(links_md) def get_all_query_params(key): return st.query_params().get(key, []) def clear_query_params(): st.query_params() # Function to display content or image based on a query def display_content_or_image(query): # Check if the query matches any glossary term for category, terms in transhuman_glossary.items(): for term in terms: if query.lower() in term.lower(): st.subheader(f"Found in {category}:") st.write(term) return True # Return after finding and displaying the first match # Check for an image match in a predefined directory (adjust path as needed) image_dir = "images" # Example directory where images are stored image_path = f"{image_dir}/{query}.png" # Construct image path with query if os.path.exists(image_path): st.image(image_path, caption=f"Image for {query}") return True # If no content or image is found st.warning("No matching content or image found.") return False # ------------------------------------ def add_Med_Licensing_Exam_Dataset(): import streamlit as st from datasets import load_dataset dataset = load_dataset("augtoma/usmle_step_1")['test'] # Using 'test' split st.title("USMLE Step 1 Dataset Viewer") if len(dataset) == 0: st.write("๐ข The dataset is empty.") else: st.write(""" ๐ Use the search box to filter questions or use the grid to scroll through the dataset. """) # ๐ฉโ๐ฌ Search Box search_term = st.text_input("Search for a specific question:", "") # ๐ Pagination records_per_page = 100 num_records = len(dataset) num_pages = max(int(num_records / records_per_page), 1) # Skip generating the slider if num_pages is 1 (i.e., all records fit in one page) if num_pages > 1: page_number = st.select_slider("Select page:", options=list(range(1, num_pages + 1))) else: page_number = 1 # Only one page # ๐ Display Data start_idx = (page_number - 1) * records_per_page end_idx = start_idx + records_per_page # ๐งช Apply the Search Filter filtered_data = [] for record in dataset[start_idx:end_idx]: if isinstance(record, dict) and 'text' in record and 'id' in record: if search_term: if search_term.lower() in record['text'].lower(): st.markdown(record) filtered_data.append(record) else: filtered_data.append(record) # ๐ Render the Grid for record in filtered_data: st.write(f"## Question ID: {record['id']}") st.write(f"### Question:") st.write(f"{record['text']}") st.write(f"### Answer:") st.write(f"{record['answer']}") st.write("---") st.write(f"๐ Total Records: {num_records} | ๐ Displaying {start_idx+1} to {min(end_idx, num_records)}") # 1. Constants and Top Level UI Variables # My Inference API Copy API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama # Meta's Original - Chat HF Free Version: #API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-2-7b-chat-hf" API_KEY = os.getenv('API_KEY') MODEL1="meta-llama/Llama-2-7b-chat-hf" MODEL1URL="https://huggingface.co/meta-llama/Llama-2-7b-chat-hf" HF_KEY = os.getenv('HF_KEY') headers = { "Authorization": f"Bearer {HF_KEY}", "Content-Type": "application/json" } key = os.getenv('OPENAI_API_KEY') prompt = f"Write instructions to teach discharge planning along with guidelines and patient education. List entities, features and relationships to CCDA and FHIR objects in boldface." should_save = st.sidebar.checkbox("๐พ Save", value=True, help="Save your session data.") # 2. Prompt label button demo for LLM def add_witty_humor_buttons(): with st.expander("Wit and Humor ๐คฃ", expanded=True): # Tip about the Dromedary family st.markdown("๐ฌ **Fun Fact**: Dromedaries, part of the camel family, have a single hump and are adapted to arid environments. Their 'superpowers' include the ability to survive without water for up to 7 days, thanks to their specialized blood cells and water storage in their hump.") # Define button descriptions descriptions = { "Generate Limericks ๐": "Write ten random adult limericks based on quotes that are tweet length and make you laugh ๐ญ", "Wise Quotes ๐ง": "Generate ten wise quotes that are tweet length ๐ฆ", "Funny Rhymes ๐ค": "Create ten funny rhymes that are tweet length ๐ถ", "Medical Jokes ๐": "Create ten medical jokes that are tweet length ๐ฅ", "Minnesota Humor โ๏ธ": "Create ten jokes about Minnesota that are tweet length ๐จ๏ธ", "Top Funny Stories ๐": "Create ten funny stories that are tweet length ๐", "More Funny Rhymes ๐๏ธ": "Create ten more funny rhymes that are tweet length ๐ต" } # Create columns col1, col2, col3 = st.columns([1, 1, 1], gap="small") # Add buttons to columns if col1.button("Wise Limericks ๐"): StreamLLMChatResponse(descriptions["Generate Limericks ๐"]) if col2.button("Wise Quotes ๐ง"): StreamLLMChatResponse(descriptions["Wise Quotes ๐ง"]) #if col3.button("Funny Rhymes ๐ค"): # StreamLLMChatResponse(descriptions["Funny Rhymes ๐ค"]) col4, col5, col6 = st.columns([1, 1, 1], gap="small") if col4.button("Top Ten Funniest Clean Jokes ๐"): StreamLLMChatResponse(descriptions["Top Ten Funniest Clean Jokes ๐"]) if col5.button("Minnesota Humor โ๏ธ"): StreamLLMChatResponse(descriptions["Minnesota Humor โ๏ธ"]) if col6.button("Origins of Medical Science True Stories"): StreamLLMChatResponse(descriptions["Origins of Medical Science True Stories"]) col7 = st.columns(1, gap="small") if col7[0].button("Top Ten Best Write a streamlit python program prompts to build AI programs. ๐๏ธ"): StreamLLMChatResponse(descriptions["Top Ten Best Write a streamlit python program prompts to build AI programs. ๐๏ธ"]) def SpeechSynthesis(result): documentHTML5='''