File size: 4,565 Bytes
22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd 22099f6 c8875fd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
streamlit # π¨ Build interactive web apps: st.title("Hello"), st.button("Click"), st.image("pic.png") # π Display data: st.dataframe(df), st.plotly_chart(fig), st.write("Text") # π Create UI: st.slider("Range", 0, 100), st.selectbox("Choose", options), st.file_uploader("Upload") asyncio # π Run async tasks: await asyncio.sleep(1), asyncio.run(main()), asyncio.gather(*tasks) # π Handle coroutines: async def fetch(), await coro(), asyncio.create_task(func()) # β³ Manage event loops: loop = asyncio.get_event_loop(), loop.run_until_complete(), asyncio.ensure_future() websockets # π WebSocket server: async with websockets.serve(handler, "localhost", 8765): await asyncio.Future() # π‘ Client connect: async with websockets.connect("ws://localhost:8765") as ws: await ws.send("Hi") # π Real-time comms: async for msg in ws: print(msg), await ws.recv(), ws.send("Response") pillow # πΌοΈ Load images: img = Image.open("file.png"), img.resize((100, 100)), img.save("out.png") # π¨ Process images: img.convert("RGB"), ImageDraw.Draw(img).text((10, 10), "Hello"), img.rotate(90) # πΈ Manipulate: img.crop((0, 0, 50, 50)), img.filter(ImageFilter.BLUR), img.thumbnail((50, 50)) edge_tts # ποΈ Text-to-speech: comm = edge_tts.Communicate("Hello", "en-US-AriaNeural"), await comm.save("out.mp3") # π Customize voice: edge_tts.Communicate(text, voice="en-GB-SoniaNeural", rate="+10%"), await comm.save() # π’ Generate audio: async def speak(text): await edge_tts.Communicate(text, "en-US-GuyNeural").save("file.mp3") audio-recorder-streamlit # π€ Record audio: audio_bytes = audio_recorder(), st.audio(audio_bytes, format="audio/wav") # π Capture voice: if audio_bytes: process_audio(audio_bytes), st.write("Recording...") # π₯ Save recording: with open("recording.wav", "wb") as f: f.write(audio_recorder()) nest_asyncio # π§ Fix nested loops: nest_asyncio.apply(), asyncio.run(main()), loop.run_until_complete(coro()) # π Enable async in sync: nest_asyncio.apply() before asyncio.run() in Jupyter/Streamlit # β³ Patch event loop: import nest_asyncio; nest_asyncio.apply() for multiple event loops streamlit-paste-button # π Paste images: result = paste_image_button("Paste"), if result.image_data: st.image(result.image_data) # βοΈ Capture clipboard: paste_data = paste_image_button("Click"), st.write(paste_data.text_data) # πΌοΈ Process paste: if paste_image_button("Paste").image_data: save_image(result.image_data) pypdf2 # π Read PDFs: reader = PdfReader("file.pdf"), text = reader.pages[0].extract_text(), num_pages = len(reader.pages) # π Extract text: for page in PdfReader("doc.pdf").pages: st.write(page.extract_text()) # π Parse PDF: pdf = PdfReader(open("file.pdf", "rb")), st.write(pdf.metadata), page_count = len(pdf.pages) anthropic # π€ AI responses: client = anthropic.Anthropic(api_key="key"), resp = client.messages.create(model="claude-3", messages=[...]) # π’ Claude chat: msg = client.messages.create(model="claude-3-sonnet", max_tokens=1000, messages=[{"role": "user", "content": "Hi"}]) # π Query AI: response = client.completions.create(model="claude-2", prompt="Hello", max_tokens_to_sample=512) openai # π§ GPT queries: client = openai.OpenAI(api_key="key"), resp = client.chat.completions.create(model="gpt-4", messages=[...]) # π Generate text: completion = openai.Completion.create(model="text-davinci-003", prompt="Write a story", max_tokens=100) # π AI chat: chat = client.chat.completions.create(model="gpt-3.5-turbo", messages=[{"role": "user", "content": "Hello"}]) gradio-client # π Call Gradio APIs: client = Client("user/repo"), result = client.predict("input", api_name="/predict") # π‘ Remote inference: prediction = Client("awacke1/Arxiv-Paper-Search").predict("query", 10, "search", "model") # π Fetch results: resp = client.predict(param1, param2, api_name="/endpoint"), st.write(resp) python-dotenv # π Load env vars: load_dotenv(), api_key = os.getenv("API_KEY"), st.write(os.getenv("SECRET")) # π Config from .env: from dotenv import load_dotenv; load_dotenv(); db_url = os.getenv("DB_URL") # π Secure secrets: load_dotenv(".env"), token = os.getenv("TOKEN"), client = SomeClient(token) streamlit-marquee # π₯ Scrolling text: streamlit_marquee(content="Hello", background="#000", color="#FFF", font-size="20px") # π£ Dynamic marquee: streamlit_marquee(content="News", animationDuration="10s", width="100%") # π Styled ticker: streamlit_marquee(content="Update", background="#1E1E1E", lineHeight="30px") |