Spaces:
Sleeping
Sleeping
File size: 9,461 Bytes
a98d89c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 |
import streamlit as st
import json
urls = [
"https://huggingface.co/spaces/awacke1/CB-GR-Chatbot-Blenderbot",
"https://huggingface.co/spaces/awacke1/TTS-STT-Blocks",
"https://huggingface.co/spaces/awacke1/Prompt-Refinery-Text-to-Image-Generation",
"https://huggingface.co/spaces/awacke1/Video-Summary",
"https://huggingface.co/spaces/awacke1/AI-MovieMaker-Comedy",
"https://huggingface.co/spaces/awacke1/ChatGPT-Memory-Chat-Story-Generator",
"https://huggingface.co/spaces/awacke1/CloneAnyVoice",
"https://huggingface.co/spaces/awacke1/ChatGPT-Streamlit-2",
"https://huggingface.co/spaces/awacke1/WikipediaUltimateAISearch",
"https://huggingface.co/spaces/awacke1/RLHF.Cognitive.Episodic.Semantic.Memory",
"https://huggingface.co/spaces/awacke1/Memory-Shared",
"https://huggingface.co/spaces/awacke1/VideoSwap",
"https://huggingface.co/spaces/awacke1/AI-Wikipedia-Search",
"https://huggingface.co/spaces/awacke1/AutoMLUsingStreamlit-Plotly",
"https://huggingface.co/spaces/awacke1/NLP-Lyric-Chorus-Image",
"https://huggingface.co/spaces/awacke1/OpenAssistant-Chatbot-FTW-Open-Source",
"https://huggingface.co/spaces/awacke1/ChatGPTStreamlit7",
"https://huggingface.co/spaces/awacke1/MultiPDF-QA-ChatGPT-Langchain",
"https://huggingface.co/spaces/awacke1/SOTA-Plan",
"https://huggingface.co/spaces/awacke1/AIandSmartTools",
"https://huggingface.co/spaces/awacke1/3DVirtualFood",
"https://huggingface.co/spaces/awacke1/Gradio-Gallery-Health-Medical-Icon-Sets",
"https://huggingface.co/spaces/awacke1/DatasetAnalyzer",
"https://huggingface.co/spaces/awacke1/PrompTart",
"https://huggingface.co/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli",
"https://huggingface.co/spaces/awacke1/File-Memory-Operations-Human-Feedback-Gradio",
"https://huggingface.co/spaces/awacke1/Bloom.Big.Science.Continual.Generator",
"https://huggingface.co/spaces/awacke1/Ontology-Gradio",
"https://huggingface.co/spaces/awacke1/HTML5-Aframe-3dMap-Flight",
"https://huggingface.co/spaces/awacke1/Bloom.Generative.Writer",
"https://huggingface.co/spaces/awacke1/Voice-ChatGPT-Streamlit-12",
"https://huggingface.co/spaces/awacke1/HTML5-AR-VR",
"https://huggingface.co/spaces/awacke1/AnimationAI",
"https://huggingface.co/spaces/awacke1/GenerativeWordsandImages",
"https://huggingface.co/spaces/awacke1/AR-VR-IOT-Demo",
"https://huggingface.co/spaces/awacke1/ArtStyleFoodsandNutrition",
"https://huggingface.co/spaces/awacke1/CarePlanQnAWithContext",
"https://huggingface.co/spaces/awacke1/VideoSummaryYoutube3",
"https://huggingface.co/spaces/awacke1/AW-01ST-CSV-Dataset-Analyzer",
"https://huggingface.co/spaces/awacke1/Try.Playing.Learning.Sharing.On.This",
"https://huggingface.co/spaces/awacke1/google-flan-t5-base",
"https://huggingface.co/spaces/awacke1/PubMed-Parrot-Paraphraser-on-T5",
"https://huggingface.co/spaces/awacke1/Writing-Grammar-And-Paraphrase-w-Pegasus",
"https://huggingface.co/spaces/awacke1/runwayml-stable-diffusion-v1-5",
"https://huggingface.co/spaces/awacke1/DockerGoFlanT5",
"https://huggingface.co/spaces/awacke1/GradioContinualGenerator",
"https://huggingface.co/spaces/awacke1/StreamlitSuperPowerCheatSheet"
]
# Extract the last part of each URL (after the last '/') to serve as the name of the button
url_names = [url.split('/')[-1] for url in urls]
# Associate each URL with a relevant emoji based on keywords in its name
emoji_mapping = {
"Chatbot": "π€",
"TTS": "π£οΈ",
"STT": "π",
"Video": "π₯",
"MovieMaker": "πΏ",
"ChatGPT": "π¬",
"Voice": "ποΈ",
"Wikipedia": "π",
"Memory": "π§ ",
"AI": "π§ ",
"OpenAssistant": "π€",
"3D": "πΆοΈ",
"AR": "π",
"VR": "πΆοΈ",
"Animation": "ποΈ",
"Dataset": "π",
"Gradio": "π»",
"HTML5": "π",
"Writing": "βοΈ",
"Grammar": "ποΈ",
"Paraphrase": "π",
"Streamlit": "π "
}
# Map each URL name to its most relevant emoji
url_emojis = []
for name in url_names:
associated_emoji = "π" # Default emoji
for keyword, emoji in emoji_mapping.items():
if keyword in name:
associated_emoji = emoji
break
url_emojis.append(associated_emoji)
#url_emojis[:5], url_names[:5] # Display the first 5 URL names with their associated emojis
import streamlit as st
import json
import webbrowser
# Function to load the history of clicks from the text file
def load_history():
try:
with open("click_history.txt", "r") as f:
return json.load(f)
except FileNotFoundError:
return {url: 0 for url in urls}
# Function to save the updated history of clicks to the text file
def save_history(history):
with open("click_history.txt", "w") as f:
json.dump(history, f)
# Load the history of clicks
history = load_history()
# Display the buttons for each URL
for url, name, emoji in zip(urls, url_names, url_emojis):
if st.button(f"{emoji} {name}"):
# Open the URL in a new browser tab using JavaScript
st.write('<script>window.open("'+url+'", "_blank");</script>', unsafe_allow_html=True)
# Update the history of clicks
history[url] += 1
save_history(history)
# Display the number of times the URL was opened below its corresponding button
st.write(f"Clicked: {history[url]} times")
import time
from bokeh.plotting import figure
from bokeh.models import ColumnDataSource
# ... [rest of the initial code remains unchanged] ...
# Streamlit app
def main():
# Session state to hold the value of AutoRepeat button across reruns
if "auto_repeat" not in st.session_state:
st.session_state.auto_repeat = "On"
if "current_index" not in st.session_state:
st.session_state.current_index = 0 # Use 0 as a default index
# Load the history of clicks
history = load_history()
# Display the buttons for each URL
for url, name, emoji in zip(urls, url_names, url_emojis):
#if st.button(f"{emoji} {name}"):
if st.button(f"{emoji} {name}", key=url): # using the URL as the unique key
# Open the URL in a new browser tab using JavaScript
st.write('<script>window.open("'+url+'", "_blank");</script>', unsafe_allow_html=True)
# Update the history of clicks
history[url] += 1
save_history(history)
# Display the number of times the URL was opened below its corresponding button
st.write(f"Clicked: {history[url]} times")
def get_url_emojis(url_names):
url_emojis = []
for name in url_names:
associated_emoji = "π"
for keyword, emoji in emoji_mapping.items():
if keyword in name:
associated_emoji = emoji
break
url_emojis.append(associated_emoji)
return url_emojis
def load_history():
try:
with open("click_history.txt", "r") as f:
return json.load(f)
except FileNotFoundError:
return {url: 0 for url in urls}
def save_history(history):
with open("click_history.txt", "w") as f:
json.dump(history, f)
def main():
if "auto_repeat" not in st.session_state:
st.session_state.auto_repeat = "On"
history = load_history()
# Create list of dictionaries with all data
url_data = [{'url': url, 'name': name, 'emoji': emoji, 'clicks': history[url]}
for url, name, emoji in zip(urls, url_names, get_url_emojis(url_names))]
# Sort alphabetically on initial load if no clicks
if all(data['clicks'] == 0 for data in url_data):
url_data.sort(key=lambda x: x['name'].lower())
else:
# Sort by clicks (descending) and then alphabetically for ties
url_data.sort(key=lambda x: (-x['clicks'], x['name'].lower()))
# Display buttons in columns
num_cols = min(4, len(url_data))
cols = st.columns(num_cols)
for i, data in enumerate(url_data):
col = cols[i % num_cols]
with col:
if st.button(f"{data['emoji']} {data['name']}", key=f"btn_{data['url']}"):
st.write(f'<script>window.open("{data["url"]}", "_blank");</script>',
unsafe_allow_html=True)
history[data['url']] += 1
save_history(history)
st.experimental_rerun() # Rerun to update sorting
st.write(f"Clicked: {data['clicks']} times")
# Display graph for non-zero clicks
non_zero_data = [d for d in url_data if d['clicks'] > 0]
if non_zero_data:
source = ColumnDataSource(data=dict(
urls=[d['name'] for d in non_zero_data],
counts=[d['clicks'] for d in non_zero_data]
))
p = figure(x_range=[d['name'] for d in non_zero_data],
height=350,
title="Click Counts per URL",
toolbar_location=None)
p.vbar(x='urls', top='counts', width=0.9, source=source)
p.xaxis.major_label_orientation = 1.2
st.bokeh_chart(p)
# Timer logic
if st.session_state.auto_repeat == "On":
timer_placeholder = st.empty()
for i in range(10, 0, -1):
timer_placeholder.text(f"Reloading in {i} seconds...")
time.sleep(1)
st.rerun()
if __name__ == "__main__":
main() |