Spaces:
Sleeping
Sleeping
File size: 3,943 Bytes
592da76 b8193f6 29b542d c68e764 2cbfbe5 29b542d 2cbfbe5 29b542d b8193f6 29b542d 592da76 29b542d 592da76 29b542d b8193f6 29b542d b8193f6 4317bbb 29b542d 4317bbb 29b542d 4317bbb 2cbfbe5 4317bbb c68e764 4317bbb c68e764 4317bbb c68e764 b8193f6 4317bbb 29b542d 4317bbb b8193f6 4317bbb b8193f6 4317bbb b8193f6 4317bbb b8193f6 4317bbb c68e764 29b542d 4317bbb 592da76 29b542d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 |
import os
import tempfile
from pathlib import Path
"""
**Streamlit Space – minimal version (no explicit caching)**
Fixes the `StreamlitSetPageConfigMustBeFirstCommandError` **and** the
previous permission issues by:
1. Setting `HOME`, `HF_HOME`, and `HF_CACHE_DIR` **before** we import
Streamlit – that way Streamlit creates `~/.streamlit` in a writable
place instead of `/.streamlit`.
2. Moving the call to `st.set_page_config()` to be the **very first
Streamlit command**.
You can re‑add `@st.cache_resource` once everything boots.
"""
# ----------------------------------------------------------------------------
# Writable environment — must be set *before* importing streamlit
# ----------------------------------------------------------------------------
HF_TMP = tempfile.mkdtemp(prefix="hf_") # guaranteed‑writable temp dir
os.environ["HOME"] = HF_TMP # for ~/.streamlit
os.environ["HF_HOME"] = HF_TMP
os.environ["HF_CACHE_DIR"] = HF_TMP
# ----------------------------------------------------------------------------
import streamlit as st
from llama_cpp import Llama
# ----- Streamlit page config (must be first Streamlit command) -------------
st.set_page_config(page_title="Sanskrit Transliteration", page_icon="📜")
# ----------------------------------------------------------------------------
# Model loader (no Streamlit cache for now)
# ----------------------------------------------------------------------------
llm = None # global singleton
def get_model():
global llm
if llm is None:
with st.spinner("Downloading & loading model – first run takes a bit…"):
llm = Llama.from_pretrained(
repo_id="13Aluminium/Sanskrit_transliteration_Qwen3",
filename="unsloth.Q4_K_M.gguf",
cache_dir=os.environ["HF_CACHE_DIR"],
)
return llm
# ----------------------------------------------------------------------------
# Inference helper
# ----------------------------------------------------------------------------
def transliterate(text: str, *, temperature: float = 0.1, max_tokens: int = 200) -> str:
prompt = f"""Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
### Instruction:
Transliterate Sanskrit Shlok to Latin
### Input:
{text}
### Response:
"""
model = get_model()
out = model(prompt, max_tokens=max_tokens, temperature=temperature, echo=False)
return out["choices"][0]["text"].strip()
# ----------------------------------------------------------------------------
# UI
# ----------------------------------------------------------------------------
st.title("📜 Sanskrit → Latin Transliteration (Qwen3)")
sample = (
"भूरीणि भूरिकर्माणि श्रोतव्यानि विभागशः ।\n"
"अतः साधोऽत्र यत्सारं समुद्धृत्य मनीषया ।\n"
"ब्रूहि नः श्रद्दधानानां येनात्मा संप्रसीदति ॥ ११ ॥"
)
sanskrit_text = st.text_area("Sanskrit text:", value=sample, height=180)
temp = st.slider("Temperature", 0.0, 1.0, 0.1, 0.01)
max_tokens = st.number_input("Max tokens", 50, 512, 200, 1)
if st.button("🔤 Transliterate"):
if not sanskrit_text.strip():
st.warning("Please enter some Sanskrit text first.")
else:
with st.spinner("Transliterating…"):
result = transliterate(sanskrit_text, temperature=temp, max_tokens=max_tokens)
st.text(result)
# ----------------------------------------------------------------------------
# requirements.txt – keep the same:
# streamlit>=1.36
# llama-cpp-python>=0.2.11
# huggingface_hub>=0.24
# ----------------------------------------------------------------------------
|