Update requirements.txt
Browse files- requirements.txt +14 -14
requirements.txt
CHANGED
@@ -1,69 +1,69 @@
|
|
1 |
-
streamlit
|
2 |
# π¨ Build interactive web apps: st.title("Hello"), st.button("Click"), st.image("pic.png")
|
3 |
# π Display data: st.dataframe(df), st.plotly_chart(fig), st.write("Text")
|
4 |
# π Create UI: st.slider("Range", 0, 100), st.selectbox("Choose", options), st.file_uploader("Upload")
|
5 |
|
6 |
-
asyncio
|
7 |
# π Run async tasks: await asyncio.sleep(1), asyncio.run(main()), asyncio.gather(*tasks)
|
8 |
# π Handle coroutines: async def fetch(), await coro(), asyncio.create_task(func())
|
9 |
# β³ Manage event loops: loop = asyncio.get_event_loop(), loop.run_until_complete(), asyncio.ensure_future()
|
10 |
|
11 |
-
websockets
|
12 |
# π WebSocket server: async with websockets.serve(handler, "localhost", 8765): await asyncio.Future()
|
13 |
# π‘ Client connect: async with websockets.connect("ws://localhost:8765") as ws: await ws.send("Hi")
|
14 |
# π Real-time comms: async for msg in ws: print(msg), await ws.recv(), ws.send("Response")
|
15 |
|
16 |
-
pillow
|
17 |
# πΌοΈ Load images: img = Image.open("file.png"), img.resize((100, 100)), img.save("out.png")
|
18 |
# π¨ Process images: img.convert("RGB"), ImageDraw.Draw(img).text((10, 10), "Hello"), img.rotate(90)
|
19 |
# πΈ Manipulate: img.crop((0, 0, 50, 50)), img.filter(ImageFilter.BLUR), img.thumbnail((50, 50))
|
20 |
|
21 |
-
edge_tts
|
22 |
# ποΈ Text-to-speech: comm = edge_tts.Communicate("Hello", "en-US-AriaNeural"), await comm.save("out.mp3")
|
23 |
# π Customize voice: edge_tts.Communicate(text, voice="en-GB-SoniaNeural", rate="+10%"), await comm.save()
|
24 |
# π’ Generate audio: async def speak(text): await edge_tts.Communicate(text, "en-US-GuyNeural").save("file.mp3")
|
25 |
|
26 |
-
audio-recorder-streamlit
|
27 |
# π€ Record audio: audio_bytes = audio_recorder(), st.audio(audio_bytes, format="audio/wav")
|
28 |
# π Capture voice: if audio_bytes: process_audio(audio_bytes), st.write("Recording...")
|
29 |
# π₯ Save recording: with open("recording.wav", "wb") as f: f.write(audio_recorder())
|
30 |
|
31 |
-
nest_asyncio
|
32 |
# π§ Fix nested loops: nest_asyncio.apply(), asyncio.run(main()), loop.run_until_complete(coro())
|
33 |
# π Enable async in sync: nest_asyncio.apply() before asyncio.run() in Jupyter/Streamlit
|
34 |
# β³ Patch event loop: import nest_asyncio; nest_asyncio.apply() for multiple event loops
|
35 |
|
36 |
-
streamlit-paste-button
|
37 |
# π Paste images: result = paste_image_button("Paste"), if result.image_data: st.image(result.image_data)
|
38 |
# βοΈ Capture clipboard: paste_data = paste_image_button("Click"), st.write(paste_data.text_data)
|
39 |
# πΌοΈ Process paste: if paste_image_button("Paste").image_data: save_image(result.image_data)
|
40 |
|
41 |
-
pypdf2
|
42 |
# π Read PDFs: reader = PdfReader("file.pdf"), text = reader.pages[0].extract_text(), num_pages = len(reader.pages)
|
43 |
# π Extract text: for page in PdfReader("doc.pdf").pages: st.write(page.extract_text())
|
44 |
# π Parse PDF: pdf = PdfReader(open("file.pdf", "rb")), st.write(pdf.metadata), page_count = len(pdf.pages)
|
45 |
|
46 |
-
anthropic
|
47 |
# π€ AI responses: client = anthropic.Anthropic(api_key="key"), resp = client.messages.create(model="claude-3", messages=[...])
|
48 |
# π’ Claude chat: msg = client.messages.create(model="claude-3-sonnet", max_tokens=1000, messages=[{"role": "user", "content": "Hi"}])
|
49 |
# π Query AI: response = client.completions.create(model="claude-2", prompt="Hello", max_tokens_to_sample=512)
|
50 |
|
51 |
-
openai
|
52 |
# π§ GPT queries: client = openai.OpenAI(api_key="key"), resp = client.chat.completions.create(model="gpt-4", messages=[...])
|
53 |
# π Generate text: completion = openai.Completion.create(model="text-davinci-003", prompt="Write a story", max_tokens=100)
|
54 |
# π AI chat: chat = client.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello"}])
|
55 |
|
56 |
-
gradio-client
|
57 |
# π Call Gradio APIs: client = Client("user/repo"), result = client.predict("input", api_name="/predict")
|
58 |
# π‘ Remote inference: prediction = Client("awacke1/Arxiv-Paper-Search").predict("query", 10, "search", "model")
|
59 |
# π Fetch results: resp = client.predict(param1, param2, api_name="/endpoint"), st.write(resp)
|
60 |
|
61 |
-
python-dotenv
|
62 |
# π Load env vars: load_dotenv(), api_key = os.getenv("API_KEY"), st.write(os.getenv("SECRET"))
|
63 |
# π Config from .env: from dotenv import load_dotenv; load_dotenv(); db_url = os.getenv("DB_URL")
|
64 |
# π Secure secrets: load_dotenv(".env"), token = os.getenv("TOKEN"), client = SomeClient(token)
|
65 |
|
66 |
-
streamlit-marquee
|
67 |
# π₯ Scrolling text: streamlit_marquee(content="Hello", background="#000", color="#FFF", font-size="20px")
|
68 |
# π£ Dynamic marquee: streamlit_marquee(content="News", animationDuration="10s", width="100%")
|
69 |
# π Styled ticker: streamlit_marquee(content="Update", background="#1E1E1E", lineHeight="30px")
|
|
|
1 |
+
streamlit
|
2 |
# π¨ Build interactive web apps: st.title("Hello"), st.button("Click"), st.image("pic.png")
|
3 |
# π Display data: st.dataframe(df), st.plotly_chart(fig), st.write("Text")
|
4 |
# π Create UI: st.slider("Range", 0, 100), st.selectbox("Choose", options), st.file_uploader("Upload")
|
5 |
|
6 |
+
asyncio
|
7 |
# π Run async tasks: await asyncio.sleep(1), asyncio.run(main()), asyncio.gather(*tasks)
|
8 |
# π Handle coroutines: async def fetch(), await coro(), asyncio.create_task(func())
|
9 |
# β³ Manage event loops: loop = asyncio.get_event_loop(), loop.run_until_complete(), asyncio.ensure_future()
|
10 |
|
11 |
+
websockets
|
12 |
# π WebSocket server: async with websockets.serve(handler, "localhost", 8765): await asyncio.Future()
|
13 |
# π‘ Client connect: async with websockets.connect("ws://localhost:8765") as ws: await ws.send("Hi")
|
14 |
# π Real-time comms: async for msg in ws: print(msg), await ws.recv(), ws.send("Response")
|
15 |
|
16 |
+
pillow
|
17 |
# πΌοΈ Load images: img = Image.open("file.png"), img.resize((100, 100)), img.save("out.png")
|
18 |
# π¨ Process images: img.convert("RGB"), ImageDraw.Draw(img).text((10, 10), "Hello"), img.rotate(90)
|
19 |
# πΈ Manipulate: img.crop((0, 0, 50, 50)), img.filter(ImageFilter.BLUR), img.thumbnail((50, 50))
|
20 |
|
21 |
+
edge_tts
|
22 |
# ποΈ Text-to-speech: comm = edge_tts.Communicate("Hello", "en-US-AriaNeural"), await comm.save("out.mp3")
|
23 |
# π Customize voice: edge_tts.Communicate(text, voice="en-GB-SoniaNeural", rate="+10%"), await comm.save()
|
24 |
# π’ Generate audio: async def speak(text): await edge_tts.Communicate(text, "en-US-GuyNeural").save("file.mp3")
|
25 |
|
26 |
+
audio-recorder-streamlit
|
27 |
# π€ Record audio: audio_bytes = audio_recorder(), st.audio(audio_bytes, format="audio/wav")
|
28 |
# π Capture voice: if audio_bytes: process_audio(audio_bytes), st.write("Recording...")
|
29 |
# π₯ Save recording: with open("recording.wav", "wb") as f: f.write(audio_recorder())
|
30 |
|
31 |
+
nest_asyncio
|
32 |
# π§ Fix nested loops: nest_asyncio.apply(), asyncio.run(main()), loop.run_until_complete(coro())
|
33 |
# π Enable async in sync: nest_asyncio.apply() before asyncio.run() in Jupyter/Streamlit
|
34 |
# β³ Patch event loop: import nest_asyncio; nest_asyncio.apply() for multiple event loops
|
35 |
|
36 |
+
streamlit-paste-button
|
37 |
# π Paste images: result = paste_image_button("Paste"), if result.image_data: st.image(result.image_data)
|
38 |
# βοΈ Capture clipboard: paste_data = paste_image_button("Click"), st.write(paste_data.text_data)
|
39 |
# πΌοΈ Process paste: if paste_image_button("Paste").image_data: save_image(result.image_data)
|
40 |
|
41 |
+
pypdf2
|
42 |
# π Read PDFs: reader = PdfReader("file.pdf"), text = reader.pages[0].extract_text(), num_pages = len(reader.pages)
|
43 |
# π Extract text: for page in PdfReader("doc.pdf").pages: st.write(page.extract_text())
|
44 |
# π Parse PDF: pdf = PdfReader(open("file.pdf", "rb")), st.write(pdf.metadata), page_count = len(pdf.pages)
|
45 |
|
46 |
+
anthropic
|
47 |
# π€ AI responses: client = anthropic.Anthropic(api_key="key"), resp = client.messages.create(model="claude-3", messages=[...])
|
48 |
# π’ Claude chat: msg = client.messages.create(model="claude-3-sonnet", max_tokens=1000, messages=[{"role": "user", "content": "Hi"}])
|
49 |
# π Query AI: response = client.completions.create(model="claude-2", prompt="Hello", max_tokens_to_sample=512)
|
50 |
|
51 |
+
openai
|
52 |
# π§ GPT queries: client = openai.OpenAI(api_key="key"), resp = client.chat.completions.create(model="gpt-4", messages=[...])
|
53 |
# π Generate text: completion = openai.Completion.create(model="text-davinci-003", prompt="Write a story", max_tokens=100)
|
54 |
# π AI chat: chat = client.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello"}])
|
55 |
|
56 |
+
gradio-client
|
57 |
# π Call Gradio APIs: client = Client("user/repo"), result = client.predict("input", api_name="/predict")
|
58 |
# π‘ Remote inference: prediction = Client("awacke1/Arxiv-Paper-Search").predict("query", 10, "search", "model")
|
59 |
# π Fetch results: resp = client.predict(param1, param2, api_name="/endpoint"), st.write(resp)
|
60 |
|
61 |
+
python-dotenv
|
62 |
# π Load env vars: load_dotenv(), api_key = os.getenv("API_KEY"), st.write(os.getenv("SECRET"))
|
63 |
# π Config from .env: from dotenv import load_dotenv; load_dotenv(); db_url = os.getenv("DB_URL")
|
64 |
# π Secure secrets: load_dotenv(".env"), token = os.getenv("TOKEN"), client = SomeClient(token)
|
65 |
|
66 |
+
streamlit-marquee
|
67 |
# π₯ Scrolling text: streamlit_marquee(content="Hello", background="#000", color="#FFF", font-size="20px")
|
68 |
# π£ Dynamic marquee: streamlit_marquee(content="News", animationDuration="10s", width="100%")
|
69 |
# π Styled ticker: streamlit_marquee(content="Update", background="#1E1E1E", lineHeight="30px")
|