awacke1 commited on
Commit
22099f6
Β·
verified Β·
1 Parent(s): c8875fd

Update requirements.txt

Browse files
Files changed (1) hide show
  1. requirements.txt +14 -14
requirements.txt CHANGED
@@ -1,69 +1,69 @@
1
- streamlit==1.31.0
2
  # 🎨 Build interactive web apps: st.title("Hello"), st.button("Click"), st.image("pic.png")
3
  # πŸ“Š Display data: st.dataframe(df), st.plotly_chart(fig), st.write("Text")
4
  # πŸš€ Create UI: st.slider("Range", 0, 100), st.selectbox("Choose", options), st.file_uploader("Upload")
5
 
6
- asyncio==3.4.3
7
  # πŸ”„ Run async tasks: await asyncio.sleep(1), asyncio.run(main()), asyncio.gather(*tasks)
8
  # 🌐 Handle coroutines: async def fetch(), await coro(), asyncio.create_task(func())
9
  # ⏳ Manage event loops: loop = asyncio.get_event_loop(), loop.run_until_complete(), asyncio.ensure_future()
10
 
11
- websockets==12.0
12
  # 🌐 WebSocket server: async with websockets.serve(handler, "localhost", 8765): await asyncio.Future()
13
  # πŸ“‘ Client connect: async with websockets.connect("ws://localhost:8765") as ws: await ws.send("Hi")
14
  # πŸ”Š Real-time comms: async for msg in ws: print(msg), await ws.recv(), ws.send("Response")
15
 
16
- pillow==10.2.0
17
  # πŸ–ΌοΈ Load images: img = Image.open("file.png"), img.resize((100, 100)), img.save("out.png")
18
  # 🎨 Process images: img.convert("RGB"), ImageDraw.Draw(img).text((10, 10), "Hello"), img.rotate(90)
19
  # πŸ“Έ Manipulate: img.crop((0, 0, 50, 50)), img.filter(ImageFilter.BLUR), img.thumbnail((50, 50))
20
 
21
- edge_tts==6.1.12
22
  # πŸŽ™οΈ Text-to-speech: comm = edge_tts.Communicate("Hello", "en-US-AriaNeural"), await comm.save("out.mp3")
23
  # πŸ”Š Customize voice: edge_tts.Communicate(text, voice="en-GB-SoniaNeural", rate="+10%"), await comm.save()
24
  # πŸ“’ Generate audio: async def speak(text): await edge_tts.Communicate(text, "en-US-GuyNeural").save("file.mp3")
25
 
26
- audio-recorder-streamlit==0.0.8
27
  # 🎀 Record audio: audio_bytes = audio_recorder(), st.audio(audio_bytes, format="audio/wav")
28
  # πŸ”‰ Capture voice: if audio_bytes: process_audio(audio_bytes), st.write("Recording...")
29
  # πŸ“₯ Save recording: with open("recording.wav", "wb") as f: f.write(audio_recorder())
30
 
31
- nest_asyncio==1.6.0
32
  # πŸ”§ Fix nested loops: nest_asyncio.apply(), asyncio.run(main()), loop.run_until_complete(coro())
33
  # 🌐 Enable async in sync: nest_asyncio.apply() before asyncio.run() in Jupyter/Streamlit
34
  # ⏳ Patch event loop: import nest_asyncio; nest_asyncio.apply() for multiple event loops
35
 
36
- streamlit-paste-button==0.1.1
37
  # πŸ“‹ Paste images: result = paste_image_button("Paste"), if result.image_data: st.image(result.image_data)
38
  # βœ‚οΈ Capture clipboard: paste_data = paste_image_button("Click"), st.write(paste_data.text_data)
39
  # πŸ–ΌοΈ Process paste: if paste_image_button("Paste").image_data: save_image(result.image_data)
40
 
41
- pypdf2==3.0.1
42
  # πŸ“œ Read PDFs: reader = PdfReader("file.pdf"), text = reader.pages[0].extract_text(), num_pages = len(reader.pages)
43
  # πŸ“ Extract text: for page in PdfReader("doc.pdf").pages: st.write(page.extract_text())
44
  # πŸ“š Parse PDF: pdf = PdfReader(open("file.pdf", "rb")), st.write(pdf.metadata), page_count = len(pdf.pages)
45
 
46
- anthropic==0.34.2
47
  # πŸ€– AI responses: client = anthropic.Anthropic(api_key="key"), resp = client.messages.create(model="claude-3", messages=[...])
48
  # πŸ“’ Claude chat: msg = client.messages.create(model="claude-3-sonnet", max_tokens=1000, messages=[{"role": "user", "content": "Hi"}])
49
  # πŸ” Query AI: response = client.completions.create(model="claude-2", prompt="Hello", max_tokens_to_sample=512)
50
 
51
- openai==1.45.0
52
  # 🧠 GPT queries: client = openai.OpenAI(api_key="key"), resp = client.chat.completions.create(model="gpt-4", messages=[...])
53
  # πŸ“ Generate text: completion = openai.Completion.create(model="text-davinci-003", prompt="Write a story", max_tokens=100)
54
  # πŸ”Š AI chat: chat = client.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello"}])
55
 
56
- gradio-client==1.3.0
57
  # 🌐 Call Gradio APIs: client = Client("user/repo"), result = client.predict("input", api_name="/predict")
58
  # πŸ“‘ Remote inference: prediction = Client("awacke1/Arxiv-Paper-Search").predict("query", 10, "search", "model")
59
  # πŸ” Fetch results: resp = client.predict(param1, param2, api_name="/endpoint"), st.write(resp)
60
 
61
- python-dotenv==1.0.1
62
  # πŸ”‘ Load env vars: load_dotenv(), api_key = os.getenv("API_KEY"), st.write(os.getenv("SECRET"))
63
  # 🌍 Config from .env: from dotenv import load_dotenv; load_dotenv(); db_url = os.getenv("DB_URL")
64
  # πŸ“‹ Secure secrets: load_dotenv(".env"), token = os.getenv("TOKEN"), client = SomeClient(token)
65
 
66
- streamlit-marquee==0.1.0
67
  # πŸŽ₯ Scrolling text: streamlit_marquee(content="Hello", background="#000", color="#FFF", font-size="20px")
68
  # πŸ“£ Dynamic marquee: streamlit_marquee(content="News", animationDuration="10s", width="100%")
69
  # 🌈 Styled ticker: streamlit_marquee(content="Update", background="#1E1E1E", lineHeight="30px")
 
1
+ streamlit
2
  # 🎨 Build interactive web apps: st.title("Hello"), st.button("Click"), st.image("pic.png")
3
  # πŸ“Š Display data: st.dataframe(df), st.plotly_chart(fig), st.write("Text")
4
  # πŸš€ Create UI: st.slider("Range", 0, 100), st.selectbox("Choose", options), st.file_uploader("Upload")
5
 
6
+ asyncio
7
  # πŸ”„ Run async tasks: await asyncio.sleep(1), asyncio.run(main()), asyncio.gather(*tasks)
8
  # 🌐 Handle coroutines: async def fetch(), await coro(), asyncio.create_task(func())
9
  # ⏳ Manage event loops: loop = asyncio.get_event_loop(), loop.run_until_complete(), asyncio.ensure_future()
10
 
11
+ websockets
12
  # 🌐 WebSocket server: async with websockets.serve(handler, "localhost", 8765): await asyncio.Future()
13
  # πŸ“‘ Client connect: async with websockets.connect("ws://localhost:8765") as ws: await ws.send("Hi")
14
  # πŸ”Š Real-time comms: async for msg in ws: print(msg), await ws.recv(), ws.send("Response")
15
 
16
+ pillow
17
  # πŸ–ΌοΈ Load images: img = Image.open("file.png"), img.resize((100, 100)), img.save("out.png")
18
  # 🎨 Process images: img.convert("RGB"), ImageDraw.Draw(img).text((10, 10), "Hello"), img.rotate(90)
19
  # πŸ“Έ Manipulate: img.crop((0, 0, 50, 50)), img.filter(ImageFilter.BLUR), img.thumbnail((50, 50))
20
 
21
+ edge_tts
22
  # πŸŽ™οΈ Text-to-speech: comm = edge_tts.Communicate("Hello", "en-US-AriaNeural"), await comm.save("out.mp3")
23
  # πŸ”Š Customize voice: edge_tts.Communicate(text, voice="en-GB-SoniaNeural", rate="+10%"), await comm.save()
24
  # πŸ“’ Generate audio: async def speak(text): await edge_tts.Communicate(text, "en-US-GuyNeural").save("file.mp3")
25
 
26
+ audio-recorder-streamlit
27
  # 🎀 Record audio: audio_bytes = audio_recorder(), st.audio(audio_bytes, format="audio/wav")
28
  # πŸ”‰ Capture voice: if audio_bytes: process_audio(audio_bytes), st.write("Recording...")
29
  # πŸ“₯ Save recording: with open("recording.wav", "wb") as f: f.write(audio_recorder())
30
 
31
+ nest_asyncio
32
  # πŸ”§ Fix nested loops: nest_asyncio.apply(), asyncio.run(main()), loop.run_until_complete(coro())
33
  # 🌐 Enable async in sync: nest_asyncio.apply() before asyncio.run() in Jupyter/Streamlit
34
  # ⏳ Patch event loop: import nest_asyncio; nest_asyncio.apply() for multiple event loops
35
 
36
+ streamlit-paste-button
37
  # πŸ“‹ Paste images: result = paste_image_button("Paste"), if result.image_data: st.image(result.image_data)
38
  # βœ‚οΈ Capture clipboard: paste_data = paste_image_button("Click"), st.write(paste_data.text_data)
39
  # πŸ–ΌοΈ Process paste: if paste_image_button("Paste").image_data: save_image(result.image_data)
40
 
41
+ pypdf2
42
  # πŸ“œ Read PDFs: reader = PdfReader("file.pdf"), text = reader.pages[0].extract_text(), num_pages = len(reader.pages)
43
  # πŸ“ Extract text: for page in PdfReader("doc.pdf").pages: st.write(page.extract_text())
44
  # πŸ“š Parse PDF: pdf = PdfReader(open("file.pdf", "rb")), st.write(pdf.metadata), page_count = len(pdf.pages)
45
 
46
+ anthropic
47
  # πŸ€– AI responses: client = anthropic.Anthropic(api_key="key"), resp = client.messages.create(model="claude-3", messages=[...])
48
  # πŸ“’ Claude chat: msg = client.messages.create(model="claude-3-sonnet", max_tokens=1000, messages=[{"role": "user", "content": "Hi"}])
49
  # πŸ” Query AI: response = client.completions.create(model="claude-2", prompt="Hello", max_tokens_to_sample=512)
50
 
51
+ openai
52
  # 🧠 GPT queries: client = openai.OpenAI(api_key="key"), resp = client.chat.completions.create(model="gpt-4", messages=[...])
53
  # πŸ“ Generate text: completion = openai.Completion.create(model="text-davinci-003", prompt="Write a story", max_tokens=100)
54
  # πŸ”Š AI chat: chat = client.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello"}])
55
 
56
+ gradio-client
57
  # 🌐 Call Gradio APIs: client = Client("user/repo"), result = client.predict("input", api_name="/predict")
58
  # πŸ“‘ Remote inference: prediction = Client("awacke1/Arxiv-Paper-Search").predict("query", 10, "search", "model")
59
  # πŸ” Fetch results: resp = client.predict(param1, param2, api_name="/endpoint"), st.write(resp)
60
 
61
+ python-dotenv
62
  # πŸ”‘ Load env vars: load_dotenv(), api_key = os.getenv("API_KEY"), st.write(os.getenv("SECRET"))
63
  # 🌍 Config from .env: from dotenv import load_dotenv; load_dotenv(); db_url = os.getenv("DB_URL")
64
  # πŸ“‹ Secure secrets: load_dotenv(".env"), token = os.getenv("TOKEN"), client = SomeClient(token)
65
 
66
+ streamlit-marquee
67
  # πŸŽ₯ Scrolling text: streamlit_marquee(content="Hello", background="#000", color="#FFF", font-size="20px")
68
  # πŸ“£ Dynamic marquee: streamlit_marquee(content="News", animationDuration="10s", width="100%")
69
  # 🌈 Styled ticker: streamlit_marquee(content="Update", background="#1E1E1E", lineHeight="30px")